hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b54b8e260fd3d24d705553f40c1dc5044170b381 | 197 | py | Python | reports/admin.py | thepylot/Django-Import-Export | 210d3a742a4be93b78b1b67d68f025c725447e09 | [
"MIT"
] | 7 | 2019-12-17T02:39:05.000Z | 2021-06-07T21:41:29.000Z | reports/admin.py | thepylot/Django-Import-Export | 210d3a742a4be93b78b1b67d68f025c725447e09 | [
"MIT"
] | 1 | 2020-07-14T08:40:59.000Z | 2020-07-14T08:40:59.000Z | reports/admin.py | raszidzie/Django-Import-Export | 210d3a742a4be93b78b1b67d68f025c725447e09 | [
"MIT"
] | 4 | 2020-02-04T03:46:05.000Z | 2020-12-11T02:44:22.000Z | from import_export.admin import ImportExportModelAdmin
from django.contrib import admin
from .models import Employee
@admin.register(Employee)
class EmployeeAdmin(ImportExportModelAdmin):
pass | 28.142857 | 54 | 0.847716 |
d790dd9a150832fb4059e48735b74214bccade6f | 1,670 | py | Python | Section 4/face_datasets.py | PacktPublishing/Hands-On-Python-Deep-Learning | 7562dce2f7a5f60623e68536b47a67f3c56c8445 | [
"MIT"
] | 10 | 2019-01-30T07:49:30.000Z | 2021-12-19T02:17:40.000Z | Section 4/face_datasets.py | PacktPublishing/Hands-On-Python-Deep-Learning | 7562dce2f7a5f60623e68536b47a67f3c56c8445 | [
"MIT"
] | null | null | null | Section 4/face_datasets.py | PacktPublishing/Hands-On-Python-Deep-Learning | 7562dce2f7a5f60623e68536b47a67f3c56c8445 | [
"MIT"
] | 6 | 2019-01-30T01:59:23.000Z | 2021-12-19T20:14:57.000Z |
# Import OpenCV2 for image processing
import cv2
import os
def assure_path_exists(path):
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
# Start capturing video
vid_cam = cv2.VideoCapture(0)
# Detect object in video stream using Haarcascade Frontal Face
face_detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
# For each person, one face id
face_id = 1
# Initialize sample face image
count = 0
assure_path_exists("dataset/")
# Start looping
while(True):
# Capture video frame
_, image_frame = vid_cam.read()
# Convert frame to grayscale
gray = cv2.cvtColor(image_frame, cv2.COLOR_BGR2GRAY)
# Detect frames of different sizes, list of faces rectangles
faces = face_detector.detectMultiScale(gray, 1.3, 5)
# Loops for each faces
for (x,y,w,h) in faces:
# Crop the image frame into rectangle
cv2.rectangle(image_frame, (x,y), (x+w,y+h), (255,0,0), 2)
# Increment sample face image
count += 1
# Save the captured image into the datasets folder
cv2.imwrite("dataset/User." + str(face_id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w])
# Display the video frame, with bounded rectangle on the person's face
cv2.imshow('frame', image_frame)
# To stop taking video, press 'q' for at least 100ms
if cv2.waitKey(100) & 0xFF == ord('q'):
break
# If image taken reach 100, stop taking video
elif count>100:
break
# Stop video
vid_cam.release()
# Close all started windows
cv2.destroyAllWindows()
| 25.30303 | 99 | 0.642515 |
f16223156d168d8225f1b2c13617660a28c36ef0 | 169 | py | Python | frontendWeb/COVID/urls.py | Anthony-ferrari/OSU_Hackathon_Brute_Force | 42bae7b960bd860d5065dbec782c51dc845eafa9 | [
"MIT"
] | 1 | 2020-03-28T20:27:29.000Z | 2020-03-28T20:27:29.000Z | frontendWeb/COVID/urls.py | Anthony-ferrari/OSU_Hackathon_Brute_Force | 42bae7b960bd860d5065dbec782c51dc845eafa9 | [
"MIT"
] | null | null | null | frontendWeb/COVID/urls.py | Anthony-ferrari/OSU_Hackathon_Brute_Force | 42bae7b960bd860d5065dbec782c51dc845eafa9 | [
"MIT"
] | 1 | 2020-03-28T08:29:55.000Z | 2020-03-28T08:29:55.000Z | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='COVID19-home'),
path('about/', views.about, name='COVID19-about'),
] | 24.142857 | 54 | 0.668639 |
7a01ce3af54125757b65f9d451e74ff8f2d0fded | 7,764 | py | Python | docs/conf.py | royaso/hovercraft | 2ca3e8cfd00f5e28077d37bf142e1efd55a63df3 | [
"MIT"
] | 1 | 2017-09-27T07:05:09.000Z | 2017-09-27T07:05:09.000Z | docs/conf.py | azmodie/hovercraft | ad382d68e2fc0da5cccc079b76954593bf1e71e1 | [
"CC0-1.0"
] | null | null | null | docs/conf.py | azmodie/hovercraft | ad382d68e2fc0da5cccc079b76954593bf1e71e1 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Hovercraft! documentation build configuration file, created by
# sphinx-quickstart on Thu Feb 7 20:44:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hovercraft!'
copyright = u'2013, Lennart Regebro'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Hovercraftdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Hovercraft.tex', u'Hovercraft! Documentation',
u'Lennart Regebro', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hovercraft', u'Hovercraft! Documentation',
[u'Lennart Regebro'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Hovercraft', u'Hovercraft! Documentation',
u'Lennart Regebro', 'Hovercraft', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 31.950617 | 80 | 0.714451 |
d641e8bbca93ee1d3332392b8e0f2d1deceb513b | 12,555 | py | Python | core/storage/auth/gae_models.py | mridul-netizen/oppia | ed1afe89b0971ed23ee29e1c30901ae6211d3d5a | [
"Apache-2.0"
] | null | null | null | core/storage/auth/gae_models.py | mridul-netizen/oppia | ed1afe89b0971ed23ee29e1c30901ae6211d3d5a | [
"Apache-2.0"
] | null | null | null | core/storage/auth/gae_models.py | mridul-netizen/oppia | ed1afe89b0971ed23ee29e1c30901ae6211d3d5a | [
"Apache-2.0"
] | 1 | 2020-12-09T21:33:49.000Z | 2020-12-09T21:33:49.000Z | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for managing user authentication."""
from __future__ import absolute_import
from __future__ import unicode_literals
from core.platform import models
import feconf
from typing import Dict, List, Optional, Text, cast # isort:skip # pylint: disable=unused-import
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import base_models
from mypy_imports import datastore_services
from mypy_imports import user_models
base_models, user_models = models.Registry.import_models(
[models.NAMES.base_model, models.NAMES.user])
datastore_services = models.Registry.import_datastore_services()
ONLY_FIREBASE_SEED_MODEL_ID = '1'
class UserAuthDetailsModel(base_models.BaseModel):
"""Stores the authentication details for a particular user.
Instances of this class are keyed by user id.
"""
# Authentication identifier from Google AppEngine (GAE). Exists only for
# full users. None for profile users.
gae_id = datastore_services.StringProperty(indexed=True)
# Authentication identifier from the Firebase authentication server.
firebase_auth_id = datastore_services.StringProperty(indexed=True)
# For profile users, the user ID of the full user associated with them.
# None for full users. Required for profiles because gae_id/firebase_auth_id
# attribute is None for them, hence this attribute stores their association
# with a full user who do have a gae_id/firebase_auth_id.
parent_user_id = (
datastore_services.StringProperty(indexed=True, default=None))
@staticmethod
def get_deletion_policy():
# type: () -> base_models.DELETION_POLICY
"""Model contains data to delete corresponding to a user: id, gae_id,
firebase_auth_id, and parent_user_id fields.
"""
return base_models.DELETION_POLICY.DELETE_AT_END
@staticmethod
def get_model_association_to_user():
# type: () -> base_models.MODEL_ASSOCIATION_TO_USER
"""Currently, the model holds authentication details relevant only for
backend. Currently the only relevant user data is the username of the
parent.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.ONE_INSTANCE_PER_USER
@staticmethod
def get_field_names_for_takeout():
# type: () -> Dict[Text, Text]
"""We do not want to export the internal user id for the parent, so we
export the username instead.
"""
return {
'parent_user_id': 'parent_username'
}
@classmethod
def get_export_policy(cls):
# type: () -> Dict[Text, base_models.EXPORT_POLICY]
"""Model doesn't contain any data directly corresponding to a user.
Currently, the model holds authentication details relevant only for
backend, and no exportable user data. It may contain user data in the
future.
"""
return dict(super(cls, cls).get_export_policy(), **{
'gae_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'firebase_auth_id': base_models.EXPORT_POLICY.NOT_APPLICABLE,
'parent_user_id': base_models.EXPORT_POLICY.EXPORTED
})
@classmethod
def export_data(cls, user_id):
# type: (Text) -> Dict[Text, Text]
"""Exports the username of the parent."""
user_auth_model = cls.get(user_id, strict=False)
if user_auth_model and user_auth_model.parent_user_id:
parent_data = user_models.UserSettingsModel.get(
user_auth_model.parent_user_id)
parent_username = parent_data.username
return {'parent_username': parent_username}
else:
return {}
@classmethod
def apply_deletion_policy(cls, user_id):
# type: (Text) -> None
"""Delete instances of UserAuthDetailsModel for the user.
Args:
user_id: str. The ID of the user whose data should be deleted.
"""
cls.delete_by_id(user_id)
@classmethod
def has_reference_to_user_id(cls, user_id):
# type: (Text) -> bool
"""Check whether UserAuthDetailsModel exists for the given user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any UserAuthDetailsModel refers to the given user ID.
"""
return cls.get_by_id(user_id) is not None
@classmethod
def get_by_auth_id(cls, provider_id, auth_id):
# type: (Text, Text) -> Optional[UserAuthDetailsModel]
"""Fetch a user entry by auth_id of a particular auth service.
Args:
provider_id: str. Name of the provider of the auth ID.
auth_id: str. Authentication detail corresponding to the
authentication provider.
Returns:
UserAuthDetailsModel. The UserAuthDetailsModel instance having a
particular user mapped to the given auth_id and the auth provider
if there exists one, else None.
"""
if provider_id == feconf.GAE_AUTH_PROVIDER_ID:
model = cls.query(cls.gae_id == auth_id).get()
elif provider_id == feconf.FIREBASE_AUTH_PROVIDER_ID:
model = cls.query(cls.firebase_auth_id == auth_id).get()
else:
return None
return cast(Optional[UserAuthDetailsModel], model)
class UserIdentifiersModel(base_models.BaseModel):
"""Stores the relationship between user ID and GAE ID.
Instances of this class are keyed by GAE ID.
"""
user_id = datastore_services.StringProperty(required=True, indexed=True)
@staticmethod
def get_deletion_policy():
# type: () -> base_models.DELETION_POLICY
"""Model contains data to delete corresponding to a user: id, and
user_id fields.
"""
return base_models.DELETION_POLICY.DELETE_AT_END
@staticmethod
def get_model_association_to_user():
# type: () -> base_models.MODEL_ASSOCIATION_TO_USER
"""Currently, the model holds identifiers relevant only for backend that
should not be exported.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
# type: () -> Dict[Text, base_models.EXPORT_POLICY]
"""Model doesn't contain any data directly corresponding to a user.
Currently, the model holds authentication details relevant only for
backend, and no exportable user data. It may contain user data in the
future.
"""
return dict(super(cls, cls).get_export_policy(), **{
'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE
})
@classmethod
def apply_deletion_policy(cls, user_id):
# type: (Text) -> None
"""Delete instances of UserIdentifiersModel for the user.
Args:
user_id: str. The ID of the user whose data should be deleted.
"""
keys = cls.query(
cls.user_id == user_id).fetch(keys_only=True)
datastore_services.delete_multi(
cast(List[datastore_services.Key], keys))
@classmethod
def has_reference_to_user_id(cls, user_id):
# type: (Text) -> bool
"""Check whether UserIdentifiersModel exists for the given user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any UserIdentifiersModel refers to the given user ID.
"""
return cls.query(cls.user_id == user_id).get(keys_only=True) is not None
@classmethod
def get_by_gae_id(cls, gae_id):
# type: (Text) -> Optional[UserIdentifiersModel]
"""Fetch an entry by GAE ID.
Args:
gae_id: str. The GAE ID.
Returns:
UserIdentifiersModel. The model with user_id field equal to user_id
argument.
"""
return cls.get_by_id(gae_id)
@classmethod
def get_by_user_id(cls, user_id):
# type: (Text) -> Optional[UserIdentifiersModel]
"""Fetch an entry by user ID.
Args:
user_id: str. The user ID.
Returns:
UserIdentifiersModel. The model with user_id field equal to user_id
argument.
"""
model = cls.query(cls.user_id == user_id).get()
return cast(Optional[UserIdentifiersModel], model)
class UserIdByFirebaseAuthIdModel(base_models.BaseModel):
"""Stores the relationship between user ID and Firebase auth ID.
Instances of this class are keyed by Firebase auth ID.
"""
user_id = datastore_services.StringProperty(required=True, indexed=True)
@staticmethod
def get_deletion_policy():
# type: () -> base_models.DELETION_POLICY
"""Model has data to delete corresponding to users: id and user_id."""
return base_models.DELETION_POLICY.DELETE_AT_END
@staticmethod
def get_model_association_to_user():
# type: () -> base_models.MODEL_ASSOCIATION_TO_USER
"""Currently, the model holds IDs relevant only for backend that should
not be exported.
"""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def get_export_policy(cls):
# type: () -> Dict[Text, base_models.EXPORT_POLICY]
"""Model doesn't contain any data directly corresponding to a user.
Currently, the model holds authentication details relevant only for
backend, and no exportable user data. It may contain user data in the
future.
"""
return dict(
super(UserIdByFirebaseAuthIdModel, cls).get_export_policy(),
**{'user_id': base_models.EXPORT_POLICY.NOT_APPLICABLE})
@classmethod
def apply_deletion_policy(cls, user_id):
# type: (Text) -> None
"""Delete instances of UserIdByFirebaseAuthIdModel for the user.
Args:
user_id: str. The ID of the user whose data should be deleted.
"""
keys = cls.query(
cls.user_id == user_id).fetch(keys_only=True)
datastore_services.delete_multi(
cast(List[datastore_services.Key], keys))
@classmethod
def has_reference_to_user_id(cls, user_id):
# type: (Text) -> bool
"""Check whether UserIdByFirebaseAuthIdModel exists for given user.
Args:
user_id: str. The ID of the user whose data should be checked.
Returns:
bool. Whether any UserIdByFirebaseAuthIdModel refers to the given
user ID.
"""
return cls.query(cls.user_id == user_id).get(keys_only=True) is not None
@classmethod
def get_by_user_id(cls, user_id):
# type: (Text) -> Optional[UserIdByFirebaseAuthIdModel]
"""Fetch an entry by user ID.
Args:
user_id: str. The user ID.
Returns:
UserIdByFirebaseAuthIdModel. The model with user_id field equal
to user_id argument.
"""
model = cls.query(cls.user_id == user_id).get()
return cast(Optional[UserIdByFirebaseAuthIdModel], model)
class FirebaseSeedModel(base_models.BaseModel):
"""Dummy model used to kick-off the DestroyFirebaseAccountsOneOffJob."""
@staticmethod
def get_deletion_policy():
# type: () -> base_models.DELETION_POLICY
"""Model should never be erased."""
return base_models.DELETION_POLICY.KEEP
@staticmethod
def get_model_association_to_user():
# type: () -> base_models.MODEL_ASSOCIATION_TO_USER
"""Model does not correspond to any users."""
return base_models.MODEL_ASSOCIATION_TO_USER.NOT_CORRESPONDING_TO_USER
@classmethod
def has_reference_to_user_id(cls, unused_user_id):
# type: (Text) -> bool
"""Model does not correspond to any users."""
return False
| 35.871429 | 96 | 0.665631 |
5ea63dccb629f52e1612bcec6ff1abc9fba015bc | 28,911 | py | Python | venv/Lib/site-packages/keras/optimizers.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | 3 | 2020-01-21T11:04:56.000Z | 2021-11-09T11:27:11.000Z | venv/Lib/site-packages/keras/optimizers.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | null | null | null | venv/Lib/site-packages/keras/optimizers.py | ishatserka/MachineLearningAndDataAnalysisCoursera | e82e772df2f4aec162cb34ac6127df10d14a625a | [
"MIT"
] | 5 | 2018-04-27T10:01:44.000Z | 2021-08-18T23:22:53.000Z | """Built-in optimizer classes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import copy
from six.moves import zip
from . import backend as K
from .utils.generic_utils import serialize_keras_object
from .utils.generic_utils import deserialize_keras_object
from .legacy import interfaces
if K.backend() == 'tensorflow':
import tensorflow as tf
def clip_norm(g, c, n):
if c <= 0: # if clipnorm == 0 no need to add ops to the graph
return g
# tf require using a special op to multiply IndexedSliced by scalar
if K.backend() == 'tensorflow':
condition = n >= c
then_expression = tf.scalar_mul(c / n, g)
else_expression = g
# saving the shape to avoid converting sparse tensor to dense
if isinstance(then_expression, tf.Tensor):
g_shape = copy.copy(then_expression.get_shape())
elif isinstance(then_expression, tf.IndexedSlices):
g_shape = copy.copy(then_expression.dense_shape)
if condition.dtype != tf.bool:
condition = tf.cast(condition, 'bool')
g = tf.cond(condition,
lambda: then_expression,
lambda: else_expression)
if isinstance(then_expression, tf.Tensor):
g.set_shape(g_shape)
elif isinstance(then_expression, tf.IndexedSlices):
g._dense_shape = g_shape
else:
g = K.switch(K.greater_equal(n, c), g * c / n, g)
return g
class Optimizer(object):
"""Abstract optimizer base class.
Note: this is the parent class of all optimizers, not an actual optimizer
that can be used for training models.
All Keras optimizers support the following keyword arguments:
clipnorm: float >= 0. Gradients will be clipped
when their L2 norm exceeds this value.
clipvalue: float >= 0. Gradients will be clipped
when their absolute value exceeds this value.
"""
def __init__(self, **kwargs):
allowed_kwargs = {'clipnorm', 'clipvalue'}
for k in kwargs:
if k not in allowed_kwargs:
raise TypeError('Unexpected keyword argument '
'passed to optimizer: ' + str(k))
self.__dict__.update(kwargs)
self.updates = []
self.weights = []
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
raise NotImplementedError
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
if None in grads:
raise ValueError('An operation has `None` for gradient. '
'Please make sure that all of your ops have a '
'gradient defined (i.e. are differentiable). '
'Common ops without gradient: '
'K.argmax, K.round, K.eval.')
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
def set_weights(self, weights):
"""Sets the weights of the optimizer, from Numpy arrays.
Should only be called after computing the gradients
(otherwise the optimizer has no weights).
# Arguments
weights: a list of Numpy arrays. The number
of arrays and their shape must match
number of the dimensions of the weights
of the optimizer (i.e. it should match the
output of `get_weights`).
# Raises
ValueError: in case of incompatible weight shapes.
"""
params = self.weights
weight_value_tuples = []
param_values = K.batch_get_value(params)
for pv, p, w in zip(param_values, params, weights):
if pv.shape != w.shape:
raise ValueError('Optimizer weight shape ' +
str(pv.shape) +
' not compatible with '
'provided weight shape ' + str(w.shape))
weight_value_tuples.append((p, w))
K.batch_set_value(weight_value_tuples)
def get_weights(self):
"""Returns the current value of the weights of the optimizer.
# Returns
A list of numpy arrays.
"""
return K.batch_get_value(self.weights)
def get_config(self):
config = {}
if hasattr(self, 'clipnorm'):
config['clipnorm'] = self.clipnorm
if hasattr(self, 'clipvalue'):
config['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
# Arguments
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter that accelerates SGD
in the relevant direction and dampens oscillations.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0.,
nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
# Arguments
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [rmsprop: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=None, decay=0.,
**kwargs):
super(RMSprop, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
epsilon: float >= 0. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=None, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + K.square(g) # update accumulator
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Optimizer):
"""Adadelta optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
It is recommended to leave it at the default value.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=None, decay=0.,
**kwargs):
super(Adadelta, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.lr = K.variable(lr, name='lr')
self.decay = K.variable(decay, name='decay')
self.iterations = K.variable(0, dtype='int64', name='iterations')
if epsilon is None:
epsilon = K.epsilon()
self.rho = rho
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
for p, g, a, d_a in zip(params, grads, accumulators, delta_accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
# use the new accumulator and the *old* delta_accumulator
update = g * K.sqrt(d_a + self.epsilon) / K.sqrt(new_a + self.epsilon)
new_p = p - lr * update
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
# update delta_accumulator
new_d_a = self.rho * d_a + (1 - self.rho) * K.square(update)
self.updates.append(K.update(d_a, new_d_a))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'rho': self.rho,
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(Adadelta, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adam(Optimizer):
"""Adam optimizer.
Default parameters follow those provided in the original paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1: float, 0 < beta < 1. Generally close to 1.
beta_2: float, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
amsgrad: boolean. Whether to apply the AMSGrad variant of this
algorithm from the paper "On the Convergence of Adam and
Beyond".
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
- [On the Convergence of Adam and Beyond](https://openreview.net/forum?id=ryQu7f-RZ)
"""
def __init__(self, lr=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., amsgrad=False, **kwargs):
super(Adam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
self.amsgrad = amsgrad
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr * (K.sqrt(1. - K.pow(self.beta_2, t)) /
(1. - K.pow(self.beta_1, t)))
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
if self.amsgrad:
vhats = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
else:
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
if self.amsgrad:
vhat_t = K.maximum(vhat, v_t)
p_t = p - lr_t * m_t / (K.sqrt(vhat_t) + self.epsilon)
self.updates.append(K.update(vhat, vhat_t))
else:
p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon,
'amsgrad': self.amsgrad}
base_config = super(Adam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adamax(Optimizer):
"""Adamax optimizer from Adam paper's Section 7.
It is a variant of Adam based on the infinity norm.
Default parameters follow those provided in the paper.
# Arguments
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adam - A Method for Stochastic Optimization](http://arxiv.org/abs/1412.6980v8)
"""
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., **kwargs):
super(Adamax, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.decay = K.variable(decay, name='decay')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.initial_decay = decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * K.cast(self.iterations,
K.dtype(self.decay))))
t = K.cast(self.iterations, K.floatx()) + 1
lr_t = lr / (1. - K.pow(self.beta_1, t))
shapes = [K.int_shape(p) for p in params]
# zero init of 1st moment
ms = [K.zeros(shape) for shape in shapes]
# zero init of exponentially weighted infinity norm
us = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + us
for p, g, m, u in zip(params, grads, ms, us):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
u_t = K.maximum(self.beta_2 * u, K.abs(g))
p_t = p - lr_t * m_t / (u_t + self.epsilon)
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(u, u_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(Adamax, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Nadam(Optimizer):
"""Nesterov Adam optimizer.
Much like Adam is essentially RMSprop with momentum,
Nadam is Adam RMSprop with Nesterov momentum.
Default parameters follow those provided in the paper.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
beta_1/beta_2: floats, 0 < beta < 1. Generally close to 1.
epsilon: float >= 0. Fuzz factor. If `None`, defaults to `K.epsilon()`.
# References
- [Nadam report](http://cs229.stanford.edu/proj2015/054_report.pdf)
- [On the importance of initialization and momentum in deep learning](http://www.cs.toronto.edu/~fritz/absps/momentum.pdf)
"""
def __init__(self, lr=0.002, beta_1=0.9, beta_2=0.999,
epsilon=None, schedule_decay=0.004, **kwargs):
super(Nadam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.m_schedule = K.variable(1., name='m_schedule')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
if epsilon is None:
epsilon = K.epsilon()
self.epsilon = epsilon
self.schedule_decay = schedule_decay
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
t = K.cast(self.iterations, K.floatx()) + 1
# Due to the recommendations in [2], i.e. warming momentum schedule
momentum_cache_t = self.beta_1 * (
1. - 0.5 * (K.pow(K.cast_to_floatx(0.96), t * self.schedule_decay)))
momentum_cache_t_1 = self.beta_1 * (
1. - 0.5 * (K.pow(K.cast_to_floatx(0.96), (t + 1) * self.schedule_decay)))
m_schedule_new = self.m_schedule * momentum_cache_t
m_schedule_next = self.m_schedule * momentum_cache_t * momentum_cache_t_1
self.updates.append((self.m_schedule, m_schedule_new))
shapes = [K.int_shape(p) for p in params]
ms = [K.zeros(shape) for shape in shapes]
vs = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + ms + vs
for p, g, m, v in zip(params, grads, ms, vs):
# the following equations given in [1]
g_prime = g / (1. - m_schedule_new)
m_t = self.beta_1 * m + (1. - self.beta_1) * g
m_t_prime = m_t / (1. - m_schedule_next)
v_t = self.beta_2 * v + (1. - self.beta_2) * K.square(g)
v_t_prime = v_t / (1. - K.pow(self.beta_2, t))
m_t_bar = (1. - momentum_cache_t) * g_prime + momentum_cache_t_1 * m_t_prime
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
p_t = p - self.lr * m_t_bar / (K.sqrt(v_t_prime) + self.epsilon)
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'epsilon': self.epsilon,
'schedule_decay': self.schedule_decay}
base_config = super(Nadam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class TFOptimizer(Optimizer):
"""Wrapper class for native TensorFlow optimizers.
"""
def __init__(self, optimizer):
self.optimizer = optimizer
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.optimizer.compute_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
opt_update = self.optimizer.apply_gradients(
grads, global_step=self.iterations)
self.updates.append(opt_update)
return self.updates
@property
def weights(self):
raise NotImplementedError
def get_config(self):
raise NotImplementedError
def from_config(self, config):
raise NotImplementedError
# Aliases.
sgd = SGD
rmsprop = RMSprop
adagrad = Adagrad
adadelta = Adadelta
adam = Adam
adamax = Adamax
nadam = Nadam
def serialize(optimizer):
return serialize_keras_object(optimizer)
def deserialize(config, custom_objects=None):
"""Inverse of the `serialize` function.
# Arguments
config: Optimizer configuration dictionary.
custom_objects: Optional dictionary mapping
names (strings) to custom objects
(classes and functions)
to be considered during deserialization.
# Returns
A Keras Optimizer instance.
"""
all_classes = {
'sgd': SGD,
'rmsprop': RMSprop,
'adagrad': Adagrad,
'adadelta': Adadelta,
'adam': Adam,
'adamax': Adamax,
'nadam': Nadam,
'tfoptimizer': TFOptimizer,
}
# Make deserialization case-insensitive for built-in optimizers.
if config['class_name'].lower() in all_classes:
config['class_name'] = config['class_name'].lower()
return deserialize_keras_object(config,
module_objects=all_classes,
custom_objects=custom_objects,
printable_module_name='optimizer')
def get(identifier):
"""Retrieves a Keras Optimizer instance.
# Arguments
identifier: Optimizer identifier, one of
- String: name of an optimizer
- Dictionary: configuration dictionary.
- Keras Optimizer instance (it will be returned unchanged).
- TensorFlow Optimizer instance
(it will be wrapped as a Keras Optimizer).
# Returns
A Keras Optimizer instance.
# Raises
ValueError: If `identifier` cannot be interpreted.
"""
if K.backend() == 'tensorflow':
# Wrap TF optimizer instances
if isinstance(identifier, tf.train.Optimizer):
return TFOptimizer(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
if isinstance(identifier, Optimizer):
return identifier
else:
raise ValueError('Could not interpret optimizer identifier: ' +
str(identifier))
| 37.841623 | 158 | 0.58559 |
477b114092c8a2963d33722d96818d583d74d57c | 1,387 | py | Python | tracerlib/launcher.py | gokceneraslan/tracer | cd59435a237a87854cbc3e6e466349e48ca2cfe9 | [
"Apache-2.0"
] | 104 | 2015-09-01T17:37:14.000Z | 2021-11-15T01:32:40.000Z | tracerlib/launcher.py | gokceneraslan/tracer | cd59435a237a87854cbc3e6e466349e48ca2cfe9 | [
"Apache-2.0"
] | 78 | 2016-03-14T16:43:03.000Z | 2022-01-09T19:46:53.000Z | tracerlib/launcher.py | gokceneraslan/tracer | cd59435a237a87854cbc3e6e466349e48ca2cfe9 | [
"Apache-2.0"
] | 55 | 2016-04-14T10:12:22.000Z | 2022-03-16T22:02:12.000Z | from __future__ import print_function
import matplotlib as mpl
mpl.use('pdf')
import argparse
import sys
from tracerlib.tasks import Assembler, Summariser, Tester, Builder
def launch():
parser = argparse.ArgumentParser(
description='TraCeR: reconstruction of TCR sequences from single-cell RNAseq data',
usage=''' tracer <mode> [<args>]
Modes are :
- assemble: assemble TCR sequences from single-cell RNA-sequencing reads
- summarise: summarise TCR sequences from set of cells, build clonotype networks
- test : use a small dataset from three cells to test TraCeR installation
- build : build resource files from gene segment sequences
use tracer <mode> -h for specific help
''')
parser.add_argument('mode', metavar="<MODE>", help='tracer mode (assemble, summarise, test or build)',
choices=['assemble', 'summarise', 'summarize', 'test', 'build'])
args = parser.parse_args(sys.argv[1:2])
task_mapper = {
'assemble': Assembler,
'summarise': Summariser,
'summarize': Summariser,
'test': Tester,
'build': Builder
}
if args.mode not in task_mapper:
print('Unrecognised mode')
parser.print_help()
exit(1)
Task = task_mapper[args.mode]
Task().run()
| 31.522727 | 106 | 0.625811 |
29d3e7b2f83368602ec09065ba1e6b23ad43461c | 914 | py | Python | diagrams/aws/general.py | fabriziofortino/diagrams | dfd8e0a52c8c4d1c3ce95dc7161c23bb2eaf0acb | [
"MIT"
] | 3 | 2021-03-08T21:42:22.000Z | 2021-11-14T00:58:53.000Z | diagrams/aws/general.py | fabriziofortino/diagrams | dfd8e0a52c8c4d1c3ce95dc7161c23bb2eaf0acb | [
"MIT"
] | 54 | 2020-12-14T08:37:05.000Z | 2022-01-14T14:51:59.000Z | diagrams/aws/general.py | fabriziofortino/diagrams | dfd8e0a52c8c4d1c3ce95dc7161c23bb2eaf0acb | [
"MIT"
] | 1 | 2021-12-10T20:05:06.000Z | 2021-12-10T20:05:06.000Z | # This module is automatically generated by autogen.sh. DO NOT EDIT.
from . import _AWS
class _General(_AWS):
_type = "general"
_icon_dir = "resources/aws/general"
class Disk(_General):
_icon = "disk.png"
class General(_General):
_icon = "general.png"
class GenericDatabase(_General):
_icon = "generic-database.png"
class GenericFirewall(_General):
_icon = "generic-firewall.png"
class GenericOfficeBuilding(_General):
_icon = "generic-office-building.png"
class GenericSamlToken(_General):
_icon = "generic-saml-token.png"
class GenericSDK(_General):
_icon = "generic-sdk.png"
class Marketplace(_General):
_icon = "marketplace.png"
class TraditionalServer(_General):
_icon = "traditional-server.png"
class User(_General):
_icon = "user.png"
class Users(_General):
_icon = "users.png"
# Aliases
OfficeBuilding = GenericOfficeBuilding
| 15.758621 | 68 | 0.708972 |
6f5478162adc581d2053f282128ba5ff074a484a | 4,394 | py | Python | lib/kubernetes/client/models/v1beta1_cron_job_status.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 7 | 2019-12-21T00:14:14.000Z | 2021-03-11T14:51:37.000Z | lib/kubernetes/client/models/v1beta1_cron_job_status.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 29 | 2019-10-09T11:16:21.000Z | 2020-06-23T09:32:09.000Z | lib/kubernetes/client/models/v1beta1_cron_job_status.py | splunkenizer/splunk_as_a_service_app | 97c4aaf927d2171bf131126cf9b70489ac75bc5a | [
"Apache-2.0"
] | 1 | 2021-05-07T10:13:31.000Z | 2021-05-07T10:13:31.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1CronJobStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active': 'list[V1ObjectReference]',
'last_schedule_time': 'datetime'
}
attribute_map = {
'active': 'active',
'last_schedule_time': 'lastScheduleTime'
}
def __init__(self, active=None, last_schedule_time=None):
"""
V1beta1CronJobStatus - a model defined in Swagger
"""
self._active = None
self._last_schedule_time = None
self.discriminator = None
if active is not None:
self.active = active
if last_schedule_time is not None:
self.last_schedule_time = last_schedule_time
@property
def active(self):
"""
Gets the active of this V1beta1CronJobStatus.
A list of pointers to currently running jobs.
:return: The active of this V1beta1CronJobStatus.
:rtype: list[V1ObjectReference]
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this V1beta1CronJobStatus.
A list of pointers to currently running jobs.
:param active: The active of this V1beta1CronJobStatus.
:type: list[V1ObjectReference]
"""
self._active = active
@property
def last_schedule_time(self):
"""
Gets the last_schedule_time of this V1beta1CronJobStatus.
Information when was the last time the job was successfully scheduled.
:return: The last_schedule_time of this V1beta1CronJobStatus.
:rtype: datetime
"""
return self._last_schedule_time
@last_schedule_time.setter
def last_schedule_time(self, last_schedule_time):
"""
Sets the last_schedule_time of this V1beta1CronJobStatus.
Information when was the last time the job was successfully scheduled.
:param last_schedule_time: The last_schedule_time of this V1beta1CronJobStatus.
:type: datetime
"""
self._last_schedule_time = last_schedule_time
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1CronJobStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.348387 | 106 | 0.565089 |
895774e30c2dd08c5b8c01f26f5e9c78d57cb1a6 | 1,623 | py | Python | preprocessing.py | Arjun-Narula/Anuvadak-Sign-language-to-text-convertor | 570667a88e64cb973740e7fca439d761319c4e9f | [
"OLDAP-2.5",
"OLDAP-2.4"
] | null | null | null | preprocessing.py | Arjun-Narula/Anuvadak-Sign-language-to-text-convertor | 570667a88e64cb973740e7fca439d761319c4e9f | [
"OLDAP-2.5",
"OLDAP-2.4"
] | null | null | null | preprocessing.py | Arjun-Narula/Anuvadak-Sign-language-to-text-convertor | 570667a88e64cb973740e7fca439d761319c4e9f | [
"OLDAP-2.5",
"OLDAP-2.4"
] | null | null | null | import cv2
import os
from image_processing import func
if not os.path.exists("temp"):
os.makedirs("temp")
if not os.path.exists("temp/train"):
os.makedirs("temp/train")
if not os.path.exists("temp/test"):
os.makedirs("temp/test")
path="data3/train" #coloured images here
path1 = "temp" #black and white images stored here
label=0 #number of characters
var = 0 #total number of images
c1 = 0 #total images in train
c2 = 0 #number images in test
for (dirpath,dirnames,filenames) in os.walk(path):
for dirname in dirnames:
print(dirname)
for(direcpath,direcnames,files) in os.walk(path+"/"+dirname):
if not os.path.exists(path1+"/train/"+dirname):
os.makedirs(path1+"/train/"+dirname)
if not os.path.exists(path1+"/test/"+dirname):
os.makedirs(path1+"/test/"+dirname)
num=0.8*len(files)
#num = 100000000000000000
i=0
for file in files:
var+=1
actual_path=path+"/"+dirname+"/"+file
actual_path1=path1+"/"+"train/"+dirname+"/"+file
actual_path2=path1+"/"+"test/"+dirname+"/"+file
img = cv2.imread(actual_path, 0)
bw_image = func(actual_path)
if i<num:
c1 += 1
cv2.imwrite(actual_path1 , bw_image)
else:
c2 += 1
cv2.imwrite(actual_path2 , bw_image)
i=i+1
label=label+1
print(var)
print(c1)
print(c2)
print(label)
| 28.982143 | 69 | 0.542206 |
f2eb3abfd3612f70bda9020835f646f82e172f8c | 29,739 | py | Python | cloudmesh/multipass/Provider.py | jebeckford/cloudmesh-multipass | c8bc14c5093ab599c184b2bf5f934b8a4f2b0791 | [
"Apache-2.0"
] | null | null | null | cloudmesh/multipass/Provider.py | jebeckford/cloudmesh-multipass | c8bc14c5093ab599c184b2bf5f934b8a4f2b0791 | [
"Apache-2.0"
] | null | null | null | cloudmesh/multipass/Provider.py | jebeckford/cloudmesh-multipass | c8bc14c5093ab599c184b2bf5f934b8a4f2b0791 | [
"Apache-2.0"
] | null | null | null | import json
import os
from cloudmesh.abstractclass.ComputeNodeABC import ComputeNodeABC
from cloudmesh.common.DateTime import DateTime
from cloudmesh.common.Printer import Printer
from cloudmesh.common.Shell import Shell
from cloudmesh.common.console import Console
from cloudmesh.common.util import banner
from cloudmesh.common.dotdict import dotdict
# some of the banners will be removed.
# they must be used for dryrun
# we also keep it for shell, abd create
# in case of create we need to look at cloudmesh-openstack as it also measures
# the time it takes to start the image and includes it into the cm dict
# all but the shell need to return a cm dict even if the original multipass does
# not return one for example if we delete a vom we need to return the cmdict
# with a stuatus and introduce a status "DELETE"
"""
from the launch documentation to derive sample. This will be used to create the
manual page for cms multipass. The sample in the provider will be used for the
cms register command which is defined in cloudmesh-cloud so you can add
multipass to your yaml fie.
Options:
-h, --help Display this help
-v, --verbose Increase logging verbosity, repeat up to three times for
more detail
-c, --cpus <cpus> Number of CPUs to allocate.
Minimum: 1, default: 1.
-d, --disk <disk> Disk space to allocate. Positive integers, in bytes, or
with K, M, G suffix.
Minimum: 512M, default: 5G.
-m, --mem <mem> Amount of memory to allocate. Positive integers, in
bytes, or with K, M, G suffix.
Minimum: 128M, default: 1G.
-n, --name <name> Name for the instance. If it is 'primary' (the
configured primary instance name), the user's home
directory is mounted inside the newly launched instance,
in 'Home'.
--cloud-init <file> Path to a user-data cloud-init configuration, or '-' for
stdin
Arguments:
image Optional image to launch. If omitted, then the default
Ubuntu LTS will be used.
<remote> can be either ‘release’ or ‘daily‘. If <remote>
is omitted, ‘release’ will be used.
<image> can be a partial image hash or an Ubuntu release
version, codename or alias.
<url> is a custom image URL that is in http://, https://,
or file:// format.
Gregor: interesting is the file:// and http(s):// we shoudl try if we can
do 19.10 on
osx and windows
"""
# multipass mount
# multipass transfer
"""
delete Delete instances
exec Run a command on an instance
find Display available images to create instances from
get Get a configuration setting
help Display help about a command
info Display information about instances
launch Create and start an Ubuntu instance
list List all available instances
mount Mount a local directory in the instance
purge Purge all deleted instances permanently
recover Recover deleted instances
restart Restart instances
set Set a configuration setting
shell Open a shell on a running instance
start Start instances
stop Stop running instances
suspend Suspend running instances
transfer Transfer files between the host and instances
umount Unmount a directory from an instance
version Show version details
"""
class Provider(ComputeNodeABC):
kind = "multipass"
sample = """
cloudmesh:
cloud:
{name}:
cm:
active: true
heading: {name}
host: TBD
label: {name}
kind: multipass
version: TBD
service: compute
credentials:
username: TBD
key_path: ~/.ssh/id_rsa.pub
default:
size: m1.medium
image: 18.04
cpu: 1
disk: 5G
mem: 1GB
@ cloudinit: file:// ... or http://
"""
output = {
"vm": {
"sort_keys": ["cm.name"],
"order": ["cm.name",
"cm.cloud",
"ipv4",
"name",
"release",
"state"],
"header": ["Name",
"Cloud",
"Address",
"Name",
"Release",
"State"],
},
"image": {
"sort_keys": ["cm.name"],
"order": ["cm.name",
"os",
"release",
"remote",
"version",
"aliases"],
"header": ["Name",
"OS",
"Release",
"Remote",
"Version",
"Alias"]
},
"info": {
"sort_keys": ["cm.name"],
"order": ["name",
"state",
"images_release",
"memory",
"mounts",
"ipv4",
"release",
"image_hash"],
"header": ["Name",
"State",
"Image Release",
"Memory",
"Mounts",
"Ipv4",
"Release",
"Image Hash"]
},
}
# please add a status here if there is one that you observe. WHat are all
# the States form multipass?
STATUS = ['UNKOWN']
def __init__(self, name="multipass",
configuration="~/.cloudmesh/cloudmesh.yaml"):
"""
Initializes the multipass provider. The default parameters are read
from the configuration file that is defined in yaml format.
:param name: The name of the provider as defined in the yaml file
:param configuration: The location of the yaml configuration file
"""
#
# The following will be added later once we have identified how to
# configure multipass from cloudmesh.yaml. This requires understanding
# the get and set methods and setting defaults for sizes
#
# conf = Config(configuration)["cloudmesh"]
# super().__init__(name, conf)
#
self.cloudtype = "multipass"
self.cloud = name
# noinspection PyPep8Naming
# TODO: docstring
def Print(self, data, output=None, kind=None):
if output == "table":
if kind == "secrule":
# this is just a temporary fix, both in sec.py and
# here the secgruops and secrules should be separated
result = []
for group in data:
# for rule in group['security_group_rules']:
# rule['name'] = group['name']
result.append(group)
data = result
order = self.output[kind]['order'] # not pretty
header = self.output[kind]['header'] # not pretty
# humanize = self.output[kind]['humanize'] # not pretty
print(Printer.flatwrite(data,
sort_keys=["name"],
order=order,
header=header,
output=output,
# humanize=humanize
)
)
else:
print(Printer.write(data, output=output))
def remove_spinner(self, str):
line=str
line.repalace("\\08-", "")
line.repalace("\\08|", "")
line.repalace("\\08\\\\", "")
line.repalace("\\08/", "")
return line
def update_dict(self, elements, kind=None):
"""
converts the dict into a list
:param elements: the list of original dicts. If elements is a single
dict a list with a single element is returned.
:param kind: for some kinds special attributes are added. This includes
key, vm, image, flavor.
:return: The list with the modified dicts
"""
if elements is None:
return None
d = []
for key, entry in elements.items():
entry['name'] = key
if "cm" not in entry:
entry['cm'] = {}
# if kind == 'ip':
# entry['name'] = entry['floating_ip_address']
entry["cm"].update({
"kind": kind,
"driver": self.cloudtype,
"cloud": self.cloud,
"name": key
})
if kind == 'vm':
entry["cm"]["updated"] = str(DateTime.now())
# if 'public_v4' in entry:
# entry['ip_public'] = entry['public_v4']
# if "created_at" in entry:
# entry["cm"]["created"] = str(entry["created_at"])
# del entry["created_at"]
# if 'status' in entry:
# entry["cm"]["status"] = str(entry["status"])
# else:
# entry["cm"]["created"] = entry["modified"]
elif kind == 'image':
entry["cm"]["created"] = entry["updated"] = str(
DateTime.now())
# elif kind == 'version':
# entry["cm"]["created"] = str(DateTime.now())
d.append(entry)
return d
# IMPLEMENT, new method
def version(self):
"""
returns just the version
:return: version dict
"""
d = {
"name": self.kind,
"multipass": None,
"multipassd": None
}
result = Shell.run(f"multipass version")
if result is not None:
for line in result.splitlines():
line = line.strip().replace("multipass ", "multipass ")
key, value = line.split(" ", 1)
d[key] = value
return d
# New method to return vm status
# TODO: docstring
def _get_vm_status(self, name=None) -> dict:
dict_result = {}
result = Shell.run(f"multipass info {name} --format=json")
if f'instance "{name}" does not exist' in result:
dict_result = {
'name': name,
'status': "instance does not exist"
}
else:
result = json.loads(result)
dict_result = {
'name': name,
'status': result["info"][name]['state']
}
return dict_result
def _images(self):
"""
internal method that returns a native multipass dict of the images.
:return: dict of images in multipass format
"""
result = Shell.run("multipass find --format=json")
#
# TODO: relpace with json.loads
#
result = eval(result)['images']
return result
def images(self, **kwargs):
"""
Lists the images on the cloud
:return: dict
"""
result = self._images()
return self.update_dict(result, kind="image")
def image(self, name=None):
"""
Gets the image with a given name
:param name: The name of the image
:return: the dict of the image
"""
result = self._images()
result = [result[name]]
return self.update_dict(result, kind="image")
def _vm(self):
"""
internal method that returns the dict of all vms
:return: dict of vms in multipass format
"""
result = Shell.run("multipass list --format=json")
#
# TODO: relpace with json.loads
#
result = eval(result)['list']
result_new_dict = {}
for i in range(len(result)):
result_new_dict[result[i]["name"]] = result[i]
return result_new_dict
def vm(self, **kwargs):
"""
Lists the vms on the cloud
:return: dict
"""
result = self._vm()
return self.update_dict(result, kind="vm")
# IMPLEMENT
def start(self, name=None):
"""
start a node
:param name: the unique node name
:return: The dict representing the node
"""
banner(f"start {name}")
dict_result = {}
result = Shell.live(f"multipass start {name}")
if result['status'] > 0:
dict_result = {"name": name,
"status": "Error when starting instance"}
else:
# Get the vm status.
dict_result = self._get_vm_status(name)
return dict_result
# IMPLEMENT
def delete(self, name=None, purge=True):
"""
Deletes the names instance
:param name: The name
:param purge: if set to tru also purges the instance
:return: the dict of the deleted vm
"""
banner(f"delete {name}")
dict_result = {}
if purge:
# terminate and purge
result = Shell.live(f"multipass delete {name} --purge")
if result['status'] > 0:
dict_result = {"name": name,
"status": "Error when deleting/destroying instance"}
else:
dict_result = {"name": name,
"status": "Instance destroyed (deleted and purged)"}
else:
# terminate only
result = Shell.live(f"multipass delete {name}")
if result['status'] > 0:
dict_result = {"name": name,
"status": "Error when deleting/destroying instance"}
else:
dict_result = {"name": name, "status": "Instance deleted"}
return dict_result
# IMPLEMENT
def list(self, **kwargs):
"""
list all vm Instances
:return: an array of dicts representing the nodes
"""
# Already implemented by vm method
return self.vm()
# IMPLEMENT
def shell(self, name="cloudmesh"):
"""
log into the shell of instance
:return: an empty string
"""
banner("shell")
os.system(f"multipass shell {name}")
print("\n")
return ""
# IMPLEMENT, POSSIBLE BUG wilth live
def run(self, name="cloudmesh", command=None, executor="buffer"):
"""
executes a command in a named multipass instance
:param name: the name of the instance
:param command: the command
:param executor: one of live, buffer, os
:return: only returned when using live or buffer
live = prints the output immediatly but also buffers it and returns
it at the end
buffer = buffers the result and only returns it after the command has
executed.
os = just uses os.system and returns a "" at the end. This is good
for debugging
"""
banner(f"run {name} {command}")
# improve next line
result = ""
if executor == "buffer":
result = Shell.live(f"multipass exec {name} -- {command}")
result = self.remove_spinner(result)
elif executor == "buffer":
result = Shell.run(f"multipass exec {name} -- {command}")
result = self.remove_spinner(result)
elif executor == "os":
os.system(f"multipass exec {name} -- {command}")
print('\n')
else:
Console.error(
"run: executor must be cloudmesh or os, found: {executor}")
return result
# IMPLEMENT, new method
def get(self, key=None):
"""
returns the variable with the given key name from multipass
:param key: the key name
:return:
"""
result = ""
if key is not None:
result = Shell.run(f"multipass get {key}")
return result
# IMPLEMENT, new method
def set(self, key=None, value=None):
"""
sets the multipass variable with the kename to value
:param key: the name of the key
:param value: the value to be set
:return:
"""
result = ""
if (key is not None):
result = Shell.run(f"multipass set {key} {value}")
return result
# IMPLEMENT
def stop(self, name=None):
"""
stops the node with the given name
:param name of the instance to stop
:return: The dict representing the node including updated status
"""
banner(f"stop {name}")
dict_result = {}
result = Shell.live(f"multipass stop {name}")
if result['status'] > 0:
dict_result = {"name": name,
"status": "Error when stopping instance"}
else:
# Get the vm status.
dict_result = self._get_vm_status(name)
return dict_result
# IMPLEMENT
def info(self, name=None):
"""
gets the information of a node with a given name
:param name:
:return: The dict representing the node including updated status
"""
result = self._info()
result = [result[name]]
return self.update_dict(result, kind="info")
# IMPLEMENT
def suspend(self, name=None):
"""
suspends the node with the given name
:param name: the name of the node
:return: The dict representing the node
"""
banner(f"suspend {name}")
os.system(f"multipass suspend {name}")
# Get the vm status.
dict_result = self._get_vm_status(name)
return dict_result
# IMPLEMENT
def resume(self, name=None):
"""
resume the named node
:param name: the name of the node
:return: the dict of the node
"""
banner(f"resume {name}")
os.system(f"multipass start {name}")
# Get the vm status.
dict_result = self._get_vm_status(name)
return dict_result
# IMPLEMENT
def destroy(self, name=None):
"""
Destroys the node
:param name: the name of the node
:return: the dict of the node
"""
banner(f"destroy {name}")
return self.delete(name, purge=True)
# IMPLEMENT
# TODO: se the sample for defaults they are not idenitified
# from kwargs and passed along
def create(self,
name=None,
image=None,
size=None,
timeout=360,
group=None,
**kwargs):
"""
creates a named node
:param group: a list of groups the vm belongs to
:param name: the name of the node
:param image: the image used
:param size: the size of the image
:param timeout: a timeout in seconds that is invoked in case the image
does not boot.
The default is set to 3 minutes.
:param kwargs: additional arguments passed along at time of boot
:return:
"""
"""
create one node
"""
banner(f"create {name} using image {image}")
arguments = dotdict(kwargs)
memory = arguments.memory
cpu = arguments.cpus
disk = arguments.disk
cloud_init = arguments.cloud_init
command = f"multipass launch --name {name}"
# Add options to create command
if cpu is not None:
command = command + f" --cpus {cpu}"
if memory is not None:
command = command + f" --mem {memory}"
if size is not None:
command = command + f" --disk {size}"
if cloud_init is not None:
command = command + f" --cloud-init {cloud_init}"
if image is not None:
command = f"{command} {image}"
# result = Shell.live(command, ) # ?
os.system(command)
# Get the vm status.
dict_result = self._get_vm_status(name)
return dict_result
# DO NOT IMPLEMENT
def set_server_metadata(self, name, **metadata):
"""
sets the metadata for the server
:param name: name of the fm
:param metadata: the metadata
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def get_server_metadata(self, name):
"""
gets the metadata for the server
:param name: name of the fm
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def delete_server_metadata(self, name):
"""
gets the metadata for the server
:param name: name of the fm
:return:
"""
raise NotImplementedError
# IMPLEMENT
# TODO: pytest
def rename(self, name=None, destination=None):
"""
rename a node
:param destination:
:param name: the current name
:return: the dict with the new name
"""
Console.error("Renaming an instance is not yet supported by multipass")
return ""
# DO NOT IMPLEMENT
def keys(self):
"""
Lists the keys on the cloud
:return: dict
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def key_upload(self, key=None):
"""
uploads the key specified in the yaml configuration to the cloud
:param key:
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def key_delete(self, name=None):
"""
deletes the key with the given name
:param name: The name of the key
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def flavors(self, **kwargs):
"""
Lists the flavors on the cloud
:return: dict of flavors
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def flavor(self, name=None):
"""
Gets the flavor with a given name
:param name: The name of the flavor
:return: The dict of the flavor
"""
raise NotImplementedError
# IMPLEMENT, POSSIBLE BUG wilth live
def reboot(self, name=None):
"""
Reboot a list of nodes with the given names
:param name: name of instance to reboot
:return: A list of dict representing the nodes
"""
banner(f"reboot {name}")
# Shell.live(f"multipass restart {name}")
os.system(f"multipass restart {name}")
dict_result = self._get_vm_status(name)
return dict_result
# DO NOT IMPLEMENT
def attach_public_ip(self, name=None, ip=None):
"""
adds a public ip to the named vm
:param name: Name of the vm
:param ip: The ip address
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def detach_public_ip(self, name=None, ip=None):
"""
adds a public ip to the named vm
:param name: Name of the vm
:param ip: The ip address
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def delete_public_ip(self, ip=None):
"""
Deletes the ip address
:param ip: the ip address, if None than all will be deleted
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def list_public_ips(self, available=False):
"""
Lists the public ip addresses.
:param available: if True only those that are not allocated will be
returned.
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def create_public_ip(self):
"""
Creates a new public IP address to use
:return: The ip address information
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def find_available_public_ip(self):
"""
Returns a single public available ip address.
:return: The ip
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def get_public_ip(self, name=None):
"""
returns the public ip
:param name: name of the server
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def list_secgroups(self, name=None):
"""
List the named security group
:param name: The name of the group, if None all will be returned
:return:
"""
# DO NOT IMPLEMENT
def list_secgroup_rules(self, name='default'):
"""
List the named security group
:param name: The name of the group, if None all will be returned
:return:
"""
raise NotImplementedError
# DO NOT IMPLEMENT
def upload_secgroup(self, name=None):
raise NotImplementedError
# DO NOT IMPLEMENT
def add_secgroup(self, name=None, description=None):
raise NotImplementedError
# DO NOT IMPLEMENT
def add_secgroup_rule(self,
name=None, # group name
port=None,
protocol=None,
ip_range=None):
raise NotImplementedError
# DO NOT IMPLEMENT
def remove_secgroup(self, name=None):
raise NotImplementedError
# DO NOT IMPLEMENT
def add_rules_to_secgroup(self, name=None, rules=None):
raise NotImplementedError
# DO NOT IMPLEMENT
def remove_rules_from_secgroup(self, name=None, rules=None):
raise NotImplementedError
# IMPLEMENT, work with Gregor
# see cloudmesh-openstack already implemented there
def wait(self,
vm=None,
interval=None,
timeout=None):
"""
wais till the given VM can be logged into
:param vm: name of the vm
:param interval: interval for checking
:param timeout: timeout
:return:
"""
# repeatedly tries run aof a knwpn command such as uname -a and
# sees if it returns without error.
# if it reaches timeout it fails
# We may want to create a special static class for this as this is
# the same likely for all clouds.
# maybe just place in the ABC class or so as implemenattion so we
# inherit
# samee with ssh
raise NotImplementedError
return False
# DO NOT IMPLEMENT
def console(self, vm=None):
"""
gets the output from the console
:param vm: name of the VM
:return:
"""
raise NotImplementedError
return ""
# DO NOT IMPLEMENT
def log(self, vm=None):
raise NotImplementedError
return ""
def info(self, **kwargs):
"""
Lists the info on the cloud
:return: dict
"""
result = self._info()
return self.update_dict(result, kind="info")
def _info(self):
"""
an internal method that returns the info of all instances as a dict in
multipass
:return: dict of all instances in multipass
"""
result = Shell.run("multipass info --all --format=json")
#
# TODO: relpace with json.loads
#
result = eval(result)['info']
return result
# implement
def mount(self, name="cloudmesh", source=None, destination=None):
"""
mounts the sourse into the instance at the given destination
TODO: proper docstring
"""
result = ""
if (source is not None) and (source is not None) and (name is not None):
result = Shell.run(
f"multipass mount --name={name} {source} {destination}")
else:
Console.error("make sure to specify all attributes")
return ""
# TODO: this should return the newly mounted volume as cloudmesh json
return result
# implement
def umount(self, name="cloudmesh", path=None):
"""
Unmount a directory from an instance.
TODO: propper docstring
:return:
"""
raise NotImplementedError
# implement
def transfer(self,
name="cloudmesh",
source=None,
destination=None,
recursive=True):
"""
copies files or entire directories into the instance
TODO: proper docstring
"""
# you may need to use glob for dirs (recursively)
# just create a glob and put it in a list.
result = ""
if None not in (source, name):
result = Shell.run(
f"multipass transfer --name={name} {source} {destination}")
else:
Console.error("make sure to specify all attributes")
return ""
# TODO: this should return the newly mounted volume as cloudmesh json
return result
if __name__ == "__main__":
# excellent-titmouse is multipass instance name
p = Provider() # name="cloudmesh"
#p.vm()
#p.start("testvm")
#p.stop("testvm")
#p.vm()
#p.run("uname -r")
#p.images()
#p.delete("testvm")
#p.vm()
#p.list()
| 28.761122 | 84 | 0.53065 |
0c27f62b3bbe87d9acbd81a7c58d2247f5966795 | 3,647 | py | Python | manage.py | BolajiOlajide/rmw-bot | 6c7e37f2f977af8fe3c90b59c1242fc3180aea88 | [
"MIT"
] | 2 | 2019-11-24T21:05:00.000Z | 2021-02-15T12:32:46.000Z | manage.py | BolajiOlajide/rmw-bot | 6c7e37f2f977af8fe3c90b59c1242fc3180aea88 | [
"MIT"
] | 1 | 2019-12-28T07:25:12.000Z | 2019-12-28T07:25:12.000Z | manage.py | BolajiOlajide/rmw-bot | 6c7e37f2f977af8fe3c90b59c1242fc3180aea88 | [
"MIT"
] | null | null | null | import json
from threading import Thread
import requests
from flask import jsonify, request
from flask_migrate import Migrate, MigrateCommand
from flask_script import Manager
from app import create_app
from app.actions.bot_actions import BotActions
from app.repositories.user_repo import UserRepo
from app.utils import allowed_commands, db, slackhelper
from app.utils.handle_bot_actions import handle_bot_actions
from config import get_env
app = create_app(get_env("APP_ENV"))
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command("db", MigrateCommand)
help_message = """The following commands are available on the RideMyWay platform
>>>
:heavy_plus_sign: *Add a ride* `/rmw add-ride` _This is used to add a ride to the system.
This option is only available to drivers._
:bow_and_arrow: *Show Rides* `/rmw show-rides` _This is used to view all recent rides in the system._
:information_source: *Get Ride Info* `/rmw ride-info <ride_id>` _Get details of a ride_
:juggling: *Join a Ride* - `/rmw join-ride <ride_id>` _Join a ride using the ride ID_
:walking: *Leave a ride* - `/rmw leave-ride <ride_id>` _Join a ride using the ride ID_
:mailbox_closed: *Cancel a ride* - `/rmw cancel-ride <ride_id>` _Cancel a ride using the ride ID_
:speaking_head_in_silhouette: *Help* - `/rmw help` _Display RMW help menu_
"""
@app.route("/", methods=["GET", "POST"])
def home():
msg = {"status": "success"}
response = jsonify(msg)
response.status_code = 200
return response
@app.route("/bot", methods=["POST", "GET", "PATCH"])
def bot():
command_text = request.data.get("text").split(" ")
request_slack_id = request.data.get("user_id")
webhook_url = request.data.get("response_url")
message_trigger = request.data.get("trigger_id")
if command_text[0] == "help" or (not command_text[0]):
response_body = {"text": help_message}
response = jsonify(response_body)
response.status_code = 200
return response
if command_text[0] not in allowed_commands:
response_body = {"text": "Invalid Command. Use the `/rmw help` to get help."}
response = jsonify(response_body)
response.status_code = 200
return response
rmw_thread = Thread(
target=handle_bot_actions,
args=(app, message_trigger, webhook_url, request_slack_id, command_text),
)
rmw_thread.start()
return "", 200
@app.route("/interactive", methods=["POST", "GET"])
def interactive():
request_payload = json.loads(request.data.get("payload"))
webhook_url = request_payload["response_url"]
slack_user_info = slackhelper.user_info(request_payload["user"]["id"])
user_data = slack_user_info["user"]
current_user = UserRepo.find_or_create(
by="slack_uid", value=request_payload["user"]["id"], user_data=user_data
)
bot_actions = BotActions(current_user=current_user)
check_for_error = True
if request_payload["type"] == "dialog_submission":
slack_data = bot_actions.add_ride(
origin=request_payload["submission"]["origin"],
destination=request_payload["submission"]["destination"],
take_off=request_payload["submission"]["take_off"],
max_seats=request_payload["submission"]["max_seats"],
)
elif request_payload["type"] == "dialog_cancellation":
slack_data = {
"text": "We hope you change your mind and help others share a ride"
}
check_for_error = False
slackhelper.send_delayed_msg(webhook_url, slack_data, check_for_error)
return "", 200
if __name__ == "__main__":
manager.run()
| 33.154545 | 101 | 0.700027 |
5d4ad9eb163eaa08df5be52e38c55dbde041fd21 | 4,652 | py | Python | ppci/lang/c/utils.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | null | null | null | ppci/lang/c/utils.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | null | null | null | ppci/lang/c/utils.py | rakati/ppci-mirror | 8f5b0282fd1122d7c389b39c86fcf5d9352b7bb2 | [
"BSD-2-Clause"
] | 1 | 2021-11-23T14:23:04.000Z | 2021-11-23T14:23:04.000Z | import re
from .nodes.visitor import Visitor
def required_padding(address, alignment):
""" Return how many padding bytes are needed to align address """
rest = address % alignment
if rest:
# We need padding bytes:
return alignment - rest
return 0
def print_ast(ast, file=None):
""" Display an abstract syntax tree.
"""
CAstPrinter(file=file).print(ast)
class CAstPrinter(Visitor):
""" Print AST of a C program """
def __init__(self, file=None):
self.indent = 0
self.file = file
def print(self, node):
self.visit(node)
def _print(self, node):
print(" " * self.indent + str(node), file=self.file)
def visit(self, node):
self._print(node)
self.indent += 1
super().visit(node)
self.indent -= 1
class LineInfo:
""" Line information indicating where the following content comes from.
Flags can be given.
1: start of new file
2: returning to a file after an include
3: the following comes from a system header file
4: The following should be wrapped inside extern "C" implicitly
"""
FLAG_START_OF_NEW_FILE = 1
FLAG_RETURN_FROM_INCLUDE = 2
def __init__(self, line, filename, flags=()):
self.line = line
self.filename = filename
self.flags = flags
def __str__(self):
if self.flags:
flags = " " + " ".join(map(str, self.flags))
else:
flags = ""
return '# {} "{}"{}'.format(self.line, self.filename, flags)
def cnum(txt: str):
""" Convert C number to integer """
assert isinstance(txt, str)
# Lower tha casing:
num = txt.lower()
if "." in txt:
# Floating point
type_specifiers = ["double"]
return float(num), type_specifiers
else:
# Integer:
# Determine base:
if num.startswith("0x"):
num = num[2:]
base = 16
elif num.startswith("0b"):
num = num[2:]
base = 2
elif num.startswith("0"):
base = 8
else:
base = 10
# Determine suffix:
type_specifiers = []
while num.endswith(("l", "u")):
if num.endswith("u"):
num = num[:-1]
type_specifiers.append("unsigned")
elif num.endswith("l"):
num = num[:-1]
type_specifiers.append("long")
else:
raise NotImplementedError()
if not type_specifiers:
type_specifiers.append("int")
# Take the integer:
return int(num, base), type_specifiers
def replace_escape_codes(txt: str):
""" Replace escape codes inside the given text """
prog = re.compile(
r"(\\[0-7]{1,3})|(\\x[0-9a-fA-F]+)|"
r'(\\[\'"?\\abfnrtve])|(\\u[0-9a-fA-F]{4})|(\\U[0-9a-fA-F]{8})'
)
pos = 0
endpos = len(txt)
parts = []
while pos != endpos:
# Find next match:
mo = prog.search(txt, pos)
if mo:
# We have an escape code:
if mo.start() > pos:
parts.append(txt[pos : mo.start()])
# print(mo.groups())
octal, hx, ch, uni1, uni2 = mo.groups()
if octal:
char = chr(int(octal[1:], 8))
elif hx:
char = chr(int(hx[2:], 16))
elif ch:
mp = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
"e": "\x1B", # Non-standard escape character
"\\": "\\",
'"': '"',
"'": "'",
"?": "?",
}
char = mp[ch[1:]]
elif uni1:
char = chr(int(uni1[2:], 16))
elif uni2:
char = chr(int(uni2[2:], 16))
else: # pragma: no cover
raise RuntimeError()
parts.append(char)
pos = mo.end()
else:
# No escape code found:
parts.append(txt[pos:])
pos = endpos
return "".join(parts)
def charval(txt: str):
""" Get the character value of a char literal """
# Wide char?
if txt.startswith("L"):
txt = txt[1:]
# Strip out ' and '
assert txt[0] == "'"
assert txt[-1] == "'"
txt = txt[1:-1]
assert len(txt) == 1
# TODO: implement wide characters!
return ord(txt), ["char"]
| 26.282486 | 75 | 0.474205 |
4fe32fa9065e7329a7d129c7926cec156ecf6bac | 3,223 | py | Python | examples/unsupervised/lsh.py | rueckstiess/pybrain | 8fc950d700aaf9d5012911d53714afb4b18225c3 | [
"BSD-3-Clause"
] | 3 | 2015-03-21T21:42:28.000Z | 2018-07-12T04:21:32.000Z | examples/unsupervised/lsh.py | bayerj/pybrain | cfef28152bd60cedfdae5390c599d4fe4d2ec095 | [
"BSD-3-Clause"
] | null | null | null | examples/unsupervised/lsh.py | bayerj/pybrain | cfef28152bd60cedfdae5390c599d4fe4d2ec095 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python2.5
from __future__ import division
__author__ = 'Justin Bayer, bayer.justin@googlemail.com'
import logging
from random import shuffle
from pylab import show, plot, clf
from pybrain.supervised.knn.lsh.nearoptimal import MultiDimHash
from scipy import random, array, dot, zeros
from scipy.linalg import orth
def randomRotation(dim):
"""Return a random rotation matrix of rank dim."""
return orth(random.random((dim, dim)))
def makeData(amount = 10000):
"""Return 2D dataset of points in (0, 1) where points in a circle of
radius .4 around the center are blue and all the others are red."""
center = array([0.5, 0.5])
def makePoint():
"""Return a random point and its satellite information.
Satellite is 'blue' if point is in the circle, else 'red'."""
point = random.random((2,)) * 10
vectorLength = lambda x: dot(x.T, x)
return point, 'blue' if vectorLength(point - center) < 25 else 'red'
return [makePoint() for _ in xrange(amount)]
if __name__ == '__main__':
# Amount of dimensions to test with
dimensions = 3
loglevel = logging.DEBUG
logging.basicConfig(level=loglevel,
format='%(asctime)s %(levelname)s %(message)s')
logging.info("Making dataset...")
data = makeData(1000)
logging.info("Making random projection...")
proj = zeros((2, dimensions))
proj[0, 0] = 1
proj[1, 1] = 1
randRot = randomRotation(dimensions)
proj = dot(proj, randRot)
logging.info("Initializing data structure...")
m = MultiDimHash(dimensions, 2, 0.80)
logging.info("Putting data into hash...")
for point, satellite in data:
point = dot(point, proj)
m.insert(point, satellite)
logging.info("Retrieve nearest neighbours...")
result = []
width, height = 2**5, 2**5
grid = (array([i / width * 10, j / height * 10])
for i in xrange(width)
for j in xrange(height))
projected_grid = [(p, dot(p, proj)) for p in grid]
# Just to fake random access
shuffle(projected_grid)
for p, pp in projected_grid:
nns = m.knn(pp, 1)
if nns == []:
continue
_, color = nns[0]
result.append((p, color))
# Visualize it
visualize = True
if visualize:
clf()
result = [((x, y), color)
for (x, y), color in result
if color is not None]
xs_red = [x for ((x, y), color) in result if color == 'red']
ys_red = [y for ((x, y), color) in result if color == 'red']
xs_blue = [x for ((x, y), color) in result if color == 'blue']
ys_blue = [y for ((x, y), color) in result if color == 'blue']
plot(xs_red, ys_red, 'ro')
plot(xs_blue, ys_blue, 'bo')
show()
ballsizes = (len(ball) for ball in m.balls.itervalues())
logging.info("Sizes of the balls: " + " ".join(str(i) for i in ballsizes))
logging.info("Finished")
| 30.990385 | 79 | 0.561278 |
13dc02451d64444e80dcbdb8660b261c3bd0499e | 2,554 | py | Python | src/richie/plugins/lti_consumer/api.py | kbneedscoffee/richie | b1961a9404eb83212e5845abffd645f86b22350e | [
"MIT"
] | null | null | null | src/richie/plugins/lti_consumer/api.py | kbneedscoffee/richie | b1961a9404eb83212e5845abffd645f86b22350e | [
"MIT"
] | null | null | null | src/richie/plugins/lti_consumer/api.py | kbneedscoffee/richie | b1961a9404eb83212e5845abffd645f86b22350e | [
"MIT"
] | null | null | null | """Declare API endpoints for LTI Consumer Plugin"""
from django.core.cache import caches
from django.core.cache.backends.base import InvalidCacheBackendError
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import translation
from rest_framework import viewsets
from rest_framework.decorators import action
from . import models
class LTIConsumerViewsSet(viewsets.GenericViewSet):
"""Viewset for LTI Consumer Plugin"""
@action(methods=["get"], detail=True, url_path="context")
# pylint: disable=no-self-use,unused-argument,invalid-name
def get_context(self, request, version=None, pk=None):
"""Process context data for the LTI Consumer Plugin.
Parameters:
- pk: the primary key of the LTI Consumer plugin to get context
Returns:
- response (JSON Object):
- url: the LTI resource url
- content_paramters: all generated parameters related to the lti provider
- is_automatic_resizing: boolean to control automatic resizing
"""
language = translation.get_language()
cache_key = f"lti_consumer_plugin__pk_{pk}_{language}"
edit = request.toolbar and request.toolbar.edit_mode_active
# Send response from cache only if edition is off
if edit:
cache = None
else:
try:
cache = caches["memory_cache"]
except InvalidCacheBackendError:
cache = None
else:
response = cache.get(cache_key)
if response is not None:
return JsonResponse(response)
plugin = get_object_or_404(models.LTIConsumer, pk=pk)
# If edition is on, check permissions to make sure it is also allowed
# before granting the instructor role
edit = edit and plugin.placeholder.has_change_plugin_permission(
request.user, plugin
)
response = {
"is_automatic_resizing": plugin.lti_provider.get(
"is_automatic_resizing", True
)
if plugin.lti_provider_id
else plugin.is_automatic_resizing,
"content_parameters": plugin.get_content_parameters(edit=edit),
"url": plugin.url,
}
if cache is not None:
# Cache the response for 5 minutes,
# lti oauth credentials are stale after this delay.
cache.set(cache_key, response, 5 * 60)
return JsonResponse(response)
| 34.986301 | 85 | 0.645262 |
4fd25b8f90890a1fac22c92a73fde972c0f86ce5 | 4,689 | py | Python | test/unit/test_context.py | pushpay/dbt | 8b96de893af692bd77fe9eb9a8104317be7b5413 | [
"Apache-2.0"
] | null | null | null | test/unit/test_context.py | pushpay/dbt | 8b96de893af692bd77fe9eb9a8104317be7b5413 | [
"Apache-2.0"
] | null | null | null | test/unit/test_context.py | pushpay/dbt | 8b96de893af692bd77fe9eb9a8104317be7b5413 | [
"Apache-2.0"
] | null | null | null | import mock
import unittest
from dbt.contracts.graph.parsed import ParsedNode
from dbt.context import parser, runtime
import dbt.exceptions
from .mock_adapter import adapter_factory
class TestVar(unittest.TestCase):
def setUp(self):
self.model = ParsedNode(
alias='model_one',
name='model_one',
database='dbt',
schema='analytics',
resource_type='model',
unique_id='model.root.model_one',
fqn=['root', 'model_one'],
empty=False,
package_name='root',
original_file_path='model_one.sql',
root_path='/usr/src/app',
refs=[],
sources=[],
depends_on={
'nodes': [],
'macros': []
},
config={
'enabled': True,
'materialized': 'view',
'persist_docs': {},
'post-hook': [],
'pre-hook': [],
'vars': {},
'quoting': {},
'column_types': {},
'tags': [],
},
tags=[],
path='model_one.sql',
raw_sql='',
description='',
columns={}
)
self.context = mock.MagicMock()
def test_var_default_something(self):
var = runtime.Var(self.model, self.context, overrides={'foo': 'baz'})
self.assertEqual(var('foo'), 'baz')
self.assertEqual(var('foo', 'bar'), 'baz')
def test_var_default_none(self):
var = runtime.Var(self.model, self.context, overrides={'foo': None})
self.assertEqual(var('foo'), None)
self.assertEqual(var('foo', 'bar'), None)
def test_var_not_defined(self):
var = runtime.Var(self.model, self.context, overrides={})
self.assertEqual(var('foo', 'bar'), 'bar')
with self.assertRaises(dbt.exceptions.CompilationException):
var('foo')
def test_parser_var_default_something(self):
var = parser.Var(self.model, self.context, overrides={'foo': 'baz'})
self.assertEqual(var('foo'), 'baz')
self.assertEqual(var('foo', 'bar'), 'baz')
def test_parser_var_default_none(self):
var = parser.Var(self.model, self.context, overrides={'foo': None})
self.assertEqual(var('foo'), None)
self.assertEqual(var('foo', 'bar'), None)
def test_parser_var_not_defined(self):
# at parse-time, we should not raise if we encounter a missing var
# that way disabled models don't get parse errors
var = parser.Var(self.model, self.context, overrides={})
self.assertEqual(var('foo', 'bar'), 'bar')
self.assertEqual(var('foo'), None)
class TestParseWrapper(unittest.TestCase):
def setUp(self):
self.mock_config = mock.MagicMock()
adapter_class = adapter_factory()
self.mock_adapter = adapter_class(self.mock_config)
self.wrapper = parser.DatabaseWrapper(self.mock_adapter)
self.responder = self.mock_adapter.responder
def test_unwrapped_method(self):
self.assertEqual(self.wrapper.quote('test_value'), '"test_value"')
self.responder.quote.assert_called_once_with('test_value')
def test_wrapped_method(self):
found = self.wrapper.get_relation('database', 'schema', 'identifier')
self.assertEqual(found, None)
self.responder.get_relation.assert_not_called()
class TestRuntimeWrapper(unittest.TestCase):
def setUp(self):
self.mock_config = mock.MagicMock()
self.mock_config.quoting = {'database': True, 'schema': True, 'identifier': True}
adapter_class = adapter_factory()
self.mock_adapter = adapter_class(self.mock_config)
self.wrapper = runtime.DatabaseWrapper(self.mock_adapter)
self.responder = self.mock_adapter.responder
def test_unwrapped_method(self):
# the 'quote' method isn't wrapped, we should get our expected inputs
self.assertEqual(self.wrapper.quote('test_value'), '"test_value"')
self.responder.quote.assert_called_once_with('test_value')
def test_wrapped_method(self):
rel = mock.MagicMock()
rel.matches.return_value = True
self.responder.list_relations_without_caching.return_value = [rel]
found = self.wrapper.get_relation('database', 'schema', 'identifier')
self.assertEqual(found, rel)
# it gets called with an information schema relation as the first arg,
# which is hard to mock.
self.responder.list_relations_without_caching.assert_called_once_with(
mock.ANY, 'schema'
)
| 35.793893 | 89 | 0.605673 |
4d7ee1ea5082795149f26c05c94649ffa81815c4 | 1,556 | py | Python | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/CreateMonitoringAgentProcessRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/CreateMonitoringAgentProcessRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-cms/aliyunsdkcms/request/v20190101/CreateMonitoringAgentProcessRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateMonitoringAgentProcessRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'CreateMonitoringAgentProcess','cms')
self.set_method('POST')
def get_ProcessName(self):
return self.get_query_params().get('ProcessName')
def set_ProcessName(self,ProcessName):
self.add_query_param('ProcessName',ProcessName)
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_ProcessUser(self):
return self.get_query_params().get('ProcessUser')
def set_ProcessUser(self,ProcessUser):
self.add_query_param('ProcessUser',ProcessUser) | 35.363636 | 87 | 0.768638 |
67981d9835e269669e9986930d7fbcb92c76a5bc | 22,255 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_express_route_cross_connection_peerings_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2019-08-23T21:14:00.000Z | 2021-09-07T18:32:34.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_express_route_cross_connection_peerings_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_express_route_cross_connection_peerings_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionPeeringsOperations:
"""ExpressRouteCrossConnectionPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs
) -> AsyncIterable["_models.ExpressRouteCrossConnectionPeeringList"]:
"""Gets all peerings in a specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionPeeringList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeeringList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeeringList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeeringList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
**kwargs
) -> "_models.ExpressRouteCrossConnectionPeering":
"""Gets the specified peering for the ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnectionPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> "_models.ExpressRouteCrossConnectionPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(peering_parameters, 'ExpressRouteCrossConnectionPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
peering_parameters: "_models.ExpressRouteCrossConnectionPeering",
**kwargs
) -> AsyncLROPoller["_models.ExpressRouteCrossConnectionPeering"]:
"""Creates or updates a peering in the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param peering_parameters: Parameters supplied to the create or update
ExpressRouteCrossConnection peering operation.
:type peering_parameters: ~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_07_01.models.ExpressRouteCrossConnectionPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
peering_parameters=peering_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}'} # type: ignore
| 51.755814 | 236 | 0.683981 |
71f92b6f4ffce77cab1bfbd4a2d6ef19216c1fe6 | 940 | py | Python | xclib/ejabberd_io.py | gibberfishinc/xmpp-cloud-auth | 9e5856f49ba1fb98fd0d5ac34d75b5ca8f9a5e2a | [
"MIT"
] | null | null | null | xclib/ejabberd_io.py | gibberfishinc/xmpp-cloud-auth | 9e5856f49ba1fb98fd0d5ac34d75b5ca8f9a5e2a | [
"MIT"
] | null | null | null | xclib/ejabberd_io.py | gibberfishinc/xmpp-cloud-auth | 9e5856f49ba1fb98fd0d5ac34d75b5ca8f9a5e2a | [
"MIT"
] | null | null | null | import sys
import logging
from struct import pack, unpack
from xclib.utf8 import unutf8
class ejabberd_io:
@classmethod
def read_request(cls):
length_field = sys.stdin.buffer.read(2)
while len(length_field) == 2:
(size,) = unpack('>H', length_field)
if size == 0:
logging.info('command length 0, treating as logical EOF')
return
cmd = sys.stdin.buffer.read(size)
if len(cmd) != size:
logging.warn('premature EOF while reading cmd: %d != %d' % (len(cmd), size))
return
x = unutf8(cmd).split(':', 3)
yield tuple(x)
length_field = sys.stdin.buffer.read(2)
@classmethod
def write_response(cls, flag):
answer = 0
if flag:
answer = 1
token = pack('>HH', 2, answer)
sys.stdout.buffer.write(token)
sys.stdout.flush()
| 29.375 | 91 | 0.546809 |
132ef5d6977effe417d867e5e45e74531628d4b8 | 8,020 | py | Python | notebooks/initial-data-analysis.py | LSDE-Flickr-ML-Classification/data-anaylsis | a8ae2c6d45274d57a9d88e3c0dc48b7715d2dfcd | [
"MIT"
] | null | null | null | notebooks/initial-data-analysis.py | LSDE-Flickr-ML-Classification/data-anaylsis | a8ae2c6d45274d57a9d88e3c0dc48b7715d2dfcd | [
"MIT"
] | null | null | null | notebooks/initial-data-analysis.py | LSDE-Flickr-ML-Classification/data-anaylsis | a8ae2c6d45274d57a9d88e3c0dc48b7715d2dfcd | [
"MIT"
] | null | null | null | # Databricks notebook source
# MAGIC %run /group07/shared
# COMMAND ----------
###
# Sampels the flicker dataset with default seed lsde in ascii numbers
###
def sample_flickr_dataset(full_sample = False, prefix = "", seed = 108115100101):
files_list = dbutils.fs.ls("/mnt/data/flickr")
DF_SAMPLE_SIZE_FRAC = 0.0001
BUCKET_COUNT = (len(files_list) if full_sample else 1)
df_sampled_buckets = spark.createDataFrame([], get_csv_data_scheme())
print("file | sample size")
for i, file in enumerate(files_list[0:BUCKET_COUNT]):
df_bucket = spark.read.format("CSV").option("delimiter", "\t").schema(get_csv_data_scheme()).load(file.path[5:]).sample(True, DF_SAMPLE_SIZE_FRAC, seed=seed)
df_bucket.write.format("parquet").save("/mnt/group07/{0}flickr_bucket_{1}.parquet".format(prefix, i))
df_sampled_buckets = df_sampled_buckets.union(df_bucket)
print("{0} | {1}".format(file.name, df_bucket.count()))
df_sampled_buckets.write.format("parquet").save("/mnt/group07/{0}flickr.parquet".format(prefix))
return df_sampled_buckets
# COMMAND ----------
def col_basic_analysis(col_name, df):
df_col = df.select(col_name)
col_total_cnt = df_col.count()
null_values_cnt = df_col.filter(df_col[col_name].isNull()).count()
return [col_name, col_total_cnt, null_values_cnt]
# COMMAND ----------
from urllib.parse import urlparse
def is_flickr_image_download_url(url):
try:
result = urlparse(url)
if (result.scheme != "http") :
return False
if (not result.path.endswith(".jpg")):
return False
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
# COMMAND ----------
import requests
from timeit import default_timer as timer
from datetime import timedelta
from PIL import Image
import io
def download_and_save_image(row):
try:
# return (row.id, None, None, None, None)
# initiate download:
start_download = timer()
req = requests.get(row.photo_video_download_url)
end_download = timer()
download_in_seconds = timedelta(seconds=end_download-start_download).t otal_seconds()
status_code = req.status_code
# process the result
if req.status_code == 200:
image_in_bytes = len(req.content)
image_file_path = "/dbfs/mnt/group07/images_stats/{0}.jpg".format(row.id)
image = Image.open(io.BytesIO(req.content))
image_height = image.size[1]
image_width = image.size[0]
f = open(image_file_path,'wb')
f.write(req.content)
f.close()
else:
image_in_bytes = None
image_file_path = None
image_height = None
image_width = None
except Exception as e:
return (row.id, None, None, None, None, None, str(e))
return (row.id, status_code, download_in_seconds, image_in_bytes, image_width, image_height, image_file_path)
# COMMAND ----------
# Triggers the flickr download based on the delta of images
def batch_download_images(df_download_links, df_download_hist):
batch_map = df_download_links.join(df_download_hist, "id", "leftanti").rdd.map(download_and_save_image)
return spark.createDataFrame(batch_map, get_download_history_schema())
# Returns a download history dataframe (either from disk or new one)
def load_download_history():
# open a new download history
if "download_history.parquet/" in [fi.name for fi in dbutils.fs.ls("/mnt/group07")]:
return spark.read.parquet("/mnt/group07/download_history.parquet")
else:
return spark.createDataFrame([], get_download_history_schema())
# COMMAND ----------
# Sample dataset (set to True if full dataset - 10gb of data!) and write to parquet
# df_sampled = sample_flickr_dataset(False, "single/")
# COMMAND ----------
df_raw_flickr = spark.read.parquet("/mnt/group07/full/flickr.parquet")
print(df_raw_flickr.count())
# COMMAND ----------
# Basic Col Analysis
basic_analysis = []
# for col in df_raw_flickr.columns:
# basic_analysis.append(col_basic_analysis(col, df_raw_flickr))
# df_col_analysis = spark.createDataFrame(basic_analysis, col_analysis_schema)
# COMMAND ----------
from pyspark.sql.functions import udf
valid_img_download_url_udf = udf(lambda str_url: str_url if is_flickr_image_download_url(str_url) else None, StringType())
df_flickr = df_raw_flickr \
.withColumn("id", df_raw_flickr["id"].cast(LongType())) \
.withColumn("date_taken", df_raw_flickr["date_taken"].cast(DateType())) \
.withColumn("date_uploaded", df_raw_flickr["date_uploaded"].cast(IntegerType())) \
.withColumn("longitude", df_raw_flickr["longitude"].cast(IntegerType())) \
.withColumn("latitude", df_raw_flickr["latitude"].cast(IntegerType())) \
.withColumn("accuracy", df_raw_flickr["accuracy"].cast(IntegerType())) \
.withColumn("photo_video_download_url", valid_img_download_url_udf(df_raw_flickr["photo_video_download_url"])) \
.withColumn("photo_video_server_id", df_raw_flickr["photo_video_server_id"].cast(IntegerType())) \
.withColumn("photo_video_farm_id", df_raw_flickr["photo_video_farm_id"].cast(IntegerType())) \
.withColumn("photo_video_marker", df_raw_flickr["photo_video_marker"].cast(IntegerType()))
#cast_analysis = []
#for col in df_flickr.columns:
# cast_analysis.append(col_basic_analysis(col, df_flickr))
#df_cast_col_analysis = spark.createDataFrame(cast_analysis, col_analysis_schema)
# check if anaylsis output differs:
#df_diff = df_cast_col_analysis.subtract(df_col_analysis)
#df_diff.show(5, False)
#df_cast_col_analysis.show(40, False)
#df_col_analysis.show(40, False)
df_flickr.printSchema()
#df_raw_flickr.select("photo_video_download_url", "photo_video_marker").where(df_raw_flickr["photo_video_marker"] == 1).show(10000, False)
#df_flickr.select("photo_video_download_url", "photo_video_marker").filter(df_flickr["photo_video_download_url"].isNull()).show(10000, False)
# TODO columns:
# .withColumn("photo_video_page_url", valid_url_udf(df_raw_flickr["photo_video_page_url"])) \
# TODO:
# data violations defined by the schema
# date fields: date_taken, date_uploaded
# url fields: photo/video page url / photo/video download url, liecense url
# is data plausible:
# -> is picture really, picture, video really video?
# check if picture exists - only request head
# COMMAND ----------
df_id_download_link = df_flickr.where(df_flickr.photo_video_marker == 0).select(df_flickr.id, df_flickr.photo_video_download_url)
# Create required dirs
dbutils.fs.mkdirs("/mnt/group07/images_stats/")
# Load the download history (to avoid downloading unecessary stuff)
df_download_history = load_download_history()
# Execute the Batch download images function, that pulls the delta history from flickr
df_downloads = batch_download_images(df_id_download_link, df_download_history)
df_download_history = df_download_history.union(df_downloads)
# write the latest download history
df_download_history.write.mode("overwrite").format("parquet").save("/mnt/group07/download_history.parquet")
# COMMAND ----------
df_download_history = spark.read.parquet("/mnt/group07/download_history.parquet")
df_download_history.show(100, False)
# COMMAND ----------
df = df_download_history.select("status_code").groupBy("status_code").count()
df.show()
# COMMAND ----------
df_download_history.filter(df_download_history.status_code == 200).show(10000, False)
# COMMAND ----------
# Get folder count
f1 = sorted(list(map( lambda x: x.name[: -4], dbutils.fs.ls("/mnt/group07/images/"))))
f2 = sorted(list(map( lambda x: x.name[: -4], dbutils.fs.ls("/mnt/group07/images_stats/"))))
# f2 = dbutils.fs.ls("/mnt/group07/images_stats/")
print(f1[0])
print(f2[0])
f_diff = set(f1) - set(f2)
df_diff = df_flickr.where(df_flickr.id.isin(f_diff))
df_diff.show(100, False)
#print(len(f2))
#print(len(f1))
print(len(f_diff))
#for fd in f_diff:
# print(fd)
# COMMAND ----------
files = dbutils.fs.ls("/mnt/data/flickr")
file = files[0]
print(file )
# COMMAND ----------
| 34.718615 | 161 | 0.723566 |
0d864c4fbfbb7de747cc7ad6c2654a1bd7a18b3d | 7,763 | py | Python | test/benchmark.py | D3X/pysendfile | 38a173ba9b827d82b19abc2ff83ead36183ffc9a | [
"MIT"
] | 119 | 2015-01-06T10:26:35.000Z | 2021-12-03T06:22:47.000Z | test/benchmark.py | D3X/pysendfile | 38a173ba9b827d82b19abc2ff83ead36183ffc9a | [
"MIT"
] | 11 | 2015-02-06T18:01:26.000Z | 2022-03-14T09:51:28.000Z | test/benchmark.py | D3X/pysendfile | 38a173ba9b827d82b19abc2ff83ead36183ffc9a | [
"MIT"
] | 24 | 2015-01-13T20:08:46.000Z | 2021-07-30T13:45:15.000Z | #!/usr/bin/env python
# ======================================================================
# This software is distributed under the MIT license reproduced below:
#
# Copyright (C) 2009-2014 Giampaolo Rodola' <g.rodola@gmail.com>
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Giampaolo Rodola' not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# Giampaolo Rodola' DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT Giampaolo Rodola' BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""
A simle benchmark script which compares plain send() and sendfile()
performances in terms of CPU time spent and bytes transmitted in
one second.
This is what I get on my Linux 2.6.38 box, AMD dual core 1.6 GHz:
send()
cpu: 28.41 usec/pass
rate: 362.13 MB/sec
sendfile()
cpu: 11.25 usec/pass
rate: 848.56 MB/sec
Works with both python 2.X and 3.X.
"""
from __future__ import with_statement
import atexit
import contextlib
import errno
import itertools
import optparse
import os
import signal
import socket
import sys
import threading
import time
import timeit
from multiprocessing import Process
from sendfile import sendfile
# overridable defaults
HOST = "127.0.0.1"
PORT = 8022
BIGFILE = "$testfile1"
BIGFILE_SIZE = 1024 * 1024 * 1024 # 1 GB
BUFFER_SIZE = 65536
# python 3 compatibility layer
def b(s):
return bytes(s, 'ascii') if sys.version_info >= (3, ) else s
# python 2.5 compatibility
try:
next
except NameError:
def next(iterator):
return iterator.next()
def print_(s, hilite=False):
if hilite:
bold = '1'
s = '\x1b[%sm%s\x1b[0m' % (';'.join([bold]), s)
sys.stdout.write(s + "\n")
sys.stdout.flush()
def create_file(filename, size):
with open(filename, 'wb') as f:
bytes = 0
chunk = b("x") * BUFFER_SIZE
while 1:
f.write(chunk)
bytes += len(chunk)
if bytes >= size:
break
def safe_remove(file):
try:
os.remove(file)
except OSError:
pass
class Spinner(threading.Thread):
def run(self):
self._exit = False
self._spinner = itertools.cycle('-\|/')
while not self._exit:
sys.stdout.write(next(self._spinner) + "\b")
sys.stdout.flush()
time.sleep(.1)
def stop(self):
self._exit = True
self.join()
class Client:
def __init__(self):
self.sock = socket.socket()
self.sock.connect((HOST, PORT))
self.sock.settimeout(1)
def retr(self):
with contextlib.closing(self.sock):
while 1:
data = self.sock.recv(BUFFER_SIZE)
if not data:
break
def retr_for_1_sec(self):
with contextlib.closing(self.sock):
stop_at = time.time() + 1
bytes_recv = 0
while stop_at > time.time():
chunk = self.sock.recv(BUFFER_SIZE)
if not chunk:
assert 0
bytes_recv += len(chunk)
return bytes_recv
def start_server(use_sendfile, keep_sending=False):
"""A simple test server which sends a file once a client connects.
use_sendfile decides whether using sendfile() or plain send().
If keep_sending is True restart sending file when EOF is reached.
"""
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((HOST, PORT))
sock.listen(1)
conn, addr = sock.accept()
sock.close()
file = open(BIGFILE, 'rb')
def on_exit(signum, fram):
file.close()
conn.close()
sys.exit(0)
signal.signal(signal.SIGTERM, on_exit)
signal.signal(signal.SIGINT, on_exit)
if not use_sendfile:
while 1:
chunk = file.read(BUFFER_SIZE)
if not chunk:
# EOF
if keep_sending:
file.seek(0)
continue
else:
break
conn.sendall(chunk)
else:
offset = 0
sockno = conn.fileno()
fileno = file.fileno()
while 1:
try:
sent = sendfile(sockno, fileno, offset, BUFFER_SIZE)
except OSError:
err = sys.exc_info()[1]
if err.errno in (errno.EAGAIN, errno.EBUSY):
continue
raise
else:
if not sent:
# EOF
if keep_sending:
offset = 0
continue
else:
break
else:
offset += sent
def main():
parser = optparse.OptionParser()
parser.add_option('-k', '--keepfile', action="store_true", default=False,
help="do not remove test file on exit")
options, args = parser.parse_args()
if not options.keepfile:
atexit.register(lambda: safe_remove(BIGFILE))
if not os.path.exists(BIGFILE) or os.path.getsize(BIGFILE) < BIGFILE_SIZE:
print_("creating big file...")
create_file(BIGFILE, BIGFILE_SIZE)
print_("starting benchmark...")
# CPU time: use sendfile()
server = Process(target=start_server, kwargs={"use_sendfile": True})
server.start()
time.sleep(0.1)
t1 = timeit.Timer(setup="from __main__ import Client; client = Client()",
stmt="client.retr()").timeit(number=1)
server.terminate()
server.join()
# CPU time: use send()
server = Process(target=start_server, kwargs={"use_sendfile": False})
server.start()
time.sleep(0.1)
t2 = timeit.Timer(setup="from __main__ import Client; client = Client()",
stmt="client.retr()").timeit(number=1)
server.terminate()
server.join()
# MB/sec: use sendfile()
server = Process(target=start_server, kwargs={"use_sendfile": True,
"keep_sending": True})
server.start()
time.sleep(0.1)
client = Client()
bytes1 = client.retr_for_1_sec()
server.terminate()
server.join()
# MB/sec: use sendfile()
server = Process(target=start_server, kwargs={"use_sendfile": False,
"keep_sending": True})
server.start()
time.sleep(0.1)
client = Client()
bytes2 = client.retr_for_1_sec()
server.terminate()
server.join()
print_(" ")
print_("send()", hilite=True)
print_(" cpu: %7.2f usec/pass" % (1000000 * t2 / 100000))
print_(" rate: %7.2f MB/sec" % round(bytes2 / 1024.0 / 1024.0, 2))
print_("")
print_("sendfile()", hilite=True)
print_(" cpu: %7.2f usec/pass" % (1000000 * t1 / 100000))
print_(" rate: %7.2f MB/sec" % round(bytes1 / 1024.0 / 1024.0, 2))
if __name__ == '__main__':
s = Spinner()
s.start()
try:
main()
finally:
s.stop()
| 28.435897 | 78 | 0.581863 |
ca10c41ae61c92a86d634aaba52857148355d2b3 | 5,574 | py | Python | PySpectra/spectra_reader.py | pmlrsg/PySpectra | 051d506ab5479a0b0cf0a9d2ec1d27eee3569d9e | [
"MIT"
] | 2 | 2017-09-14T17:56:23.000Z | 2018-07-06T11:20:49.000Z | PySpectra/spectra_reader.py | pmlrsg/PySpectra | 051d506ab5479a0b0cf0a9d2ec1d27eee3569d9e | [
"MIT"
] | 14 | 2015-11-16T09:29:06.000Z | 2019-11-08T11:53:38.000Z | PySpectra/spectra_reader.py | pmlrsg/PySpectra | 051d506ab5479a0b0cf0a9d2ec1d27eee3569d9e | [
"MIT"
] | 2 | 2019-01-31T13:02:50.000Z | 2019-11-28T20:02:19.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file has been created by ARSF Data Analysis Node and
# is licensed under the MIT Licence. A copy of this
# licence is available to download with this file.
#
# Author: Dan Clewley
# Created: 27/08/2015
import datetime
import numpy
from scipy.interpolate import interp1d
from scipy.integrate import trapz
# Set up a dictionary of time zone codes we use and offsets to UTC
# Use this to replace time zone with an offset which can be parsed by datetime
# using %z.
TIME_ZONE_DICT = {"BST" : "+0100",
"GMT" : "+0000",
"IST" : "+0530"}
class Spectra(object):
"""
Class to store spectra
and associated attributes
* file_name - Name of file spectra was extracted from
* wavelengths - Numpy array containing wavelengths
* values - Numpy array containing value for each wavelength
* pixel - Pixel (if spectra extracted from image)
* line - Line (if spectra extracted from image)
* latitude - Latitude of spectra (if available)
* longitude - Longitude of spectra (if available)
* time - Acqusition time of spectra (UTC) as python time.struct_time
* wavelength_units - units of wavelengths (e.g., 'nm' or 'um')
* value_type - type of values (typically reflectance)
* intergration_time - intergration time for instrument in seconds
* n_scans_average - number of scans averaged over (when instrument averages over multiple measurements)
* additional_metadata - dictionary containing additional metadata
"""
def __init__(self, wavelengths=None, values=None,
wavelength_units="", value_units=""):
self.file_name = None
self.wavelengths = wavelengths
self.values = values
self.pixel = None
self.line = None
self.latitude = None
self.longitude = None
self.time = None
self.wavelength_units = wavelength_units
self.value_units = value_units
self.value_scaling = 1
self.intergration_time = None
self.n_scans_average = 1
self.additional_metadata = {}
self.skip_header = None
def plot(self, label=None, **kwargs):
"""Produces a basic plot of the spectrum
Requires matplotlib to be installed
"""
from matplotlib.pyplot import plot, xlabel, ylabel
if label is None:
label = self.file_name
plot(self.wavelengths, self.values, label=label, **kwargs)
xlabel("Wavelength (%s)" % self.wavelength_units)
ylabel(self.value_units)
def get_time_difference(self, target_spectra):
"""
Get time difference between spectra and target spectra, returns
results in seconds of base_spectra - target_spectra
Requires:
* target_spectra: a spectral object
"""
# Get timedelta object (as both are datetime objects)
time_diff = self.time - target_spectra.time
return time_diff.total_seconds()
def _convolve(self, srf):
"""Actually does the convolution as specified in the 'convolve' function."""
if srf.value_units != "response":
raise ValueError('SRF must be a Spectra instance with value_units set to "response"')
# Interpolate to required wavelengths
f = interp1d(self.wavelengths, self.values)
at_srf_wavelengths = f(srf.wavelengths)
result = trapz(srf.values * at_srf_wavelengths,
srf.wavelengths) / trapz(srf.values, srf.wavelengths)
return result
def resample_wavelengths(self, new_wavelengths):
"""
Resample wavelengths in spectral object to match 'new_wavelengths'.
Replaces existing wavelengths with provided wavelengths and values with
those interpolated using new wavelengths.
Requires:
* new_wavelengths - numpy array containing new wavelengths
"""
# Interpolate to required wavelengths
new_values = numpy.interp(new_wavelengths, self.wavelengths, self.values)
self.wavelengths = new_wavelengths
self.values = new_values
def convolve(self, srf):
"""Convolve the spectrum with a Spectral Response Function.
This is generally used to convert a full spectrum to the
values that would be recorded from a sensor with wide spectral
bands (defined by the SRF given).
Requires:
* srf - Spectral Response Function to convolve to. This should be either
a single Spectra object with the value_units attribute set to "response",
or a list of such objects.
Pre-configured Spectra objects for the SRFs of various common sensors are
available in the `srf` module of this package.
Example:
# Convolve for one band
from PySpectra.srf import LANDSAT_OLI_B1
s.convolve(LANDSAT_OLI_B1)
# Convolve for multiple bands
from PySpectra.srf import LANDSAT_OLI_B1, LANDSAT_OLI_B2
s.convolve(LANDSAT_OLI_B1, LANDSAT_OLI_B2)
# Convolve for all Landsat OLI bands
from PySpectra.srf import LANDSAT_OLI
s.convolve(LANDSAT_OLI)
"""
if isinstance(srf, list):
result = [self._convolve(s) for s in srf]
else:
result = self._convolve(srf)
return result
class SpectraReader(object):
"""
Abstract class for spectra
"""
def __init__(self):
self.spectra = Spectra()
def get_spectra(self, filename):
pass
| 31.670455 | 107 | 0.657876 |
697f63d0591519f2da900d9d10d8053e4fe45ad4 | 1,040 | py | Python | bookapi/forms.py | MohamadGamal/DjangoBookstore | 20f31297be1556208322f50d810f2d2f88bbb808 | [
"MIT"
] | 1 | 2017-05-01T10:04:24.000Z | 2017-05-01T10:04:24.000Z | bookapi/forms.py | MohamadGamal/DjangoBookstore | 20f31297be1556208322f50d810f2d2f88bbb808 | [
"MIT"
] | null | null | null | bookapi/forms.py | MohamadGamal/DjangoBookstore | 20f31297be1556208322f50d810f2d2f88bbb808 | [
"MIT"
] | null | null | null | from django import forms
from .models import User as nUser
from django.contrib.auth.models import User
class SignInForm(forms.ModelForm):
password=forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ['username', 'password']#'__all__'
class SignUpForm(forms.ModelForm):
password=forms.CharField(widget=forms.PasswordInput())
confirm_password=forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ['username','email', 'password','confirm_password']#'__all__'
def clean(self):
cleaned_data = super(SignUpForm, self).clean()
password = cleaned_data.get("password")
confirm_password = cleaned_data.get("confirm_password")
if password != confirm_password:
raise forms.ValidationError(
"password and confirm_password does not match"
)
class SignUpFormmuser(forms.ModelForm):
class Meta:
model = nUser
fields = ['bio','image'] | 31.515152 | 78 | 0.660577 |
35deaef17719da9fd0e2aeb2551c8332df38343d | 953 | py | Python | signal_processing/timbral_inspection/for_pres.py | sivaprakasaman/Python_Coding_Toolbox | 4e9ce98651f2a1c58d743f1375edd1e0d9c9ba76 | [
"MIT"
] | null | null | null | signal_processing/timbral_inspection/for_pres.py | sivaprakasaman/Python_Coding_Toolbox | 4e9ce98651f2a1c58d743f1375edd1e0d9c9ba76 | [
"MIT"
] | 1 | 2022-01-09T22:52:04.000Z | 2022-01-09T22:52:04.000Z | signal_processing/timbral_inspection/for_pres.py | sivaprakasaman/Python_Coding_Toolbox | 4e9ce98651f2a1c58d743f1375edd1e0d9c9ba76 | [
"MIT"
] | 1 | 2022-01-06T02:25:43.000Z | 2022-01-06T02:25:43.000Z | from signal_processing import pure_tone_complex, sound, magphase, get_spect
import matplotlib.pyplot as plt
from scipy.signal import spectrogram as sp
import scipy as scip
import numpy as np
fs = 44100
fc = 4e3;
freq_Hz = [440];
dur_sec = 1;
mags = [1];
phi = [0];
F_0 = pure_tone_complex(freq_Hz, fs, dur_sec, mags, phi)
env = np.less(0,F_0[1])*F_0[1];
car = pure_tone_complex([fc],fs,dur_sec,mags,phi);
sos = scip.signal.butter(4,.2*fc,'low',fs = fs, output = 'sos');
env = scip.signal.sosfilt(sos,env);
stim = env*car[1];
plt.figure()
plt.plot(F_0[0],F_0[1])
plt.xlim([0/440,5/440])
plt.title('Pure Tone')
plt.figure()
plt.plot(F_0[0],stim)
plt.xlim([0/440,5/440])
plt.title('Transposed Tone')
# sound(stim,fs,fname = 'transposed.wav',savefile = 1)
# sound(.5*F_0[1],fs,fname = 'pure_440.wav',savefile = 1)
get_spect(stim, fs, DR = 220, BW = 80, xlim = [0,1], ylim = [0,8e3], colormap = 'cividis', title = 'Spectrogram | Transposed Tone');
| 24.435897 | 132 | 0.681007 |
839d595ed98c4902295a23662f7fc60c393977d5 | 22,194 | py | Python | xhtml2pdf/util.py | marcelagz/xhtml2pdf | c988b0d2174c5e91ff1fd561c7a124ab7e5a1233 | [
"Apache-2.0"
] | null | null | null | xhtml2pdf/util.py | marcelagz/xhtml2pdf | c988b0d2174c5e91ff1fd561c7a124ab7e5a1233 | [
"Apache-2.0"
] | null | null | null | xhtml2pdf/util.py | marcelagz/xhtml2pdf | c988b0d2174c5e91ff1fd561c7a124ab7e5a1233 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
import sys
from copy import copy
import arabic_reshaper
import reportlab
import reportlab.pdfbase._cidfontdata
from bidi.algorithm import get_display
from reportlab.lib.colors import Color, toColor
from reportlab.lib.enums import TA_CENTER, TA_JUSTIFY, TA_LEFT, TA_RIGHT
from reportlab.lib.units import cm, inch
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
import xhtml2pdf.default
rgb_re = re.compile(
r"^.*?rgb[a]?[(]([0-9]+).*?([0-9]+).*?([0-9]+)(?:.*?(?:[01]\.(?:[0-9]+)))?[)].*?[ ]*$")
log = logging.getLogger("xhtml2pdf")
try:
import PyPDF3
except ImportError:
PyPDF3 = None
try:
from reportlab.graphics import renderPM
except ImportError:
renderPM = None
try:
from reportlab.graphics import renderSVG
except ImportError:
renderSVG = None
#=========================================================================
# Memoize decorator
#=========================================================================
class memoized(object):
"""
A kwargs-aware memoizer, better than the one in python :)
Don't pass in too large kwargs, since this turns them into a tuple of
tuples. Also, avoid mutable types (as usual for memoizers)
What this does is to create a dictionnary of {(*parameters):return value},
and uses it as a cache for subsequent calls to the same method.
It is especially useful for functions that don't rely on external variables
and that are called often. It's a perfect match for our getSize etc...
"""
def __init__(self, func):
self.cache = {}
self.func = func
self.__doc__ = self.func.__doc__ # To avoid great confusion
self.__name__ = self.func.__name__ # This also avoids great confusion
def __call__(self, *args, **kwargs):
# Make sure the following line is not actually slower than what you're
# trying to memoize
args_plus = tuple(kwargs.items())
key = (args, args_plus)
try:
if key not in self.cache:
res = self.func(*args, **kwargs)
self.cache[key] = res
return self.cache[key]
except TypeError:
# happens if any of the parameters is a list
return self.func(*args, **kwargs)
def ErrorMsg():
"""
Helper to get a nice traceback as string
"""
import traceback
limit = None
_type, value, tb = sys.exc_info()
_list = traceback.format_tb(tb, limit) + \
traceback.format_exception_only(_type, value)
return "Traceback (innermost last):\n" + "%-20s %s" % (
" ".join(_list[:-1]),
_list[-1])
def toList(value):
if type(value) not in (list, tuple):
return [value]
return list(value)
def transform_attrs(obj, keys, container, func, extras=None):
"""
Allows to apply one function to set of keys cheching if key is in container,
also trasform ccs key to report lab keys.
extras = Are extra params for func, it will be call like func(*[param1, param2])
obj = frag
keys = [(reportlab, css), ... ]
container = cssAttr
"""
cpextras = extras
for reportlab, css in keys:
extras = cpextras
if extras is None:
extras = []
elif not isinstance(extras, list):
extras = [extras]
if css in container:
extras.insert(0, container[css])
setattr(obj,
reportlab,
func(*extras)
)
def copy_attrs(obj1, obj2, attrs):
"""
Allows copy a list of attributes from object2 to object1.
Useful for copy ccs attributes to fragment
"""
for attr in attrs:
value = getattr(obj2, attr) if hasattr(obj2, attr) else None
if value is None and isinstance(obj2, dict) and attr in obj2:
value = obj2[attr]
setattr(obj1, attr, value)
def set_value(obj, attrs, value, _copy=False):
"""
Allows set the same value to a list of attributes
"""
for attr in attrs:
if _copy:
value = copy(value)
setattr(obj, attr, value)
@memoized
def getColor(value, default=None):
"""
Convert to color value.
This returns a Color object instance from a text bit.
"""
if value is None:
return
if isinstance(value, Color):
return value
value = str(value).strip().lower()
if value == "transparent" or value == "none":
return default
if value in COLOR_BY_NAME:
return COLOR_BY_NAME[value]
if value.startswith("#") and len(value) == 4:
value = "#" + value[1] + value[1] + \
value[2] + value[2] + value[3] + value[3]
elif rgb_re.search(value):
# e.g., value = "<css function: rgb(153, 51, 153)>", go figure:
r, g, b = [int(x) for x in rgb_re.search(value).groups()]
value = "#%02x%02x%02x" % (r, g, b)
else:
# Shrug
pass
return toColor(value, default) # Calling the reportlab function
def getBorderStyle(value, default=None):
if value and (str(value).lower() not in ("none", "hidden")):
return value
return default
mm = cm / 10.0
dpi96 = (1.0 / 96.0 * inch)
_absoluteSizeTable = {
"1": 50.0 / 100.0,
"xx-small": 50.0 / 100.0,
"x-small": 50.0 / 100.0,
"2": 75.0 / 100.0,
"small": 75.0 / 100.0,
"3": 100.0 / 100.0,
"medium": 100.0 / 100.0,
"4": 125.0 / 100.0,
"large": 125.0 / 100.0,
"5": 150.0 / 100.0,
"x-large": 150.0 / 100.0,
"6": 175.0 / 100.0,
"xx-large": 175.0 / 100.0,
"7": 200.0 / 100.0,
"xxx-large": 200.0 / 100.0,
}
_relativeSizeTable = {
"larger": 1.25,
"smaller": 0.75,
"+4": 200.0 / 100.0,
"+3": 175.0 / 100.0,
"+2": 150.0 / 100.0,
"+1": 125.0 / 100.0,
"-1": 75.0 / 100.0,
"-2": 50.0 / 100.0,
"-3": 25.0 / 100.0,
}
MIN_FONT_SIZE = 1.0
@memoized
def getSize(value, relative=0, base=None, default=0.0):
"""
Converts strings to standard sizes.
That is the function taking a string of CSS size ('12pt', '1cm' and so on)
and converts it into a float in a standard unit (in our case, points).
>>> getSize('12pt')
12.0
>>> getSize('1cm')
28.346456692913385
"""
try:
original = value
if value is None:
return relative
elif type(value) is float:
return value
elif isinstance(value, int):
return float(value)
elif type(value) in (tuple, list):
value = "".join(value)
value = str(value).strip().lower().replace(",", ".")
if value[-2:] == 'cm':
return float(value[:-2].strip()) * cm
elif value[-2:] == 'mm':
return float(value[:-2].strip()) * mm # 1mm = 0.1cm
elif value[-2:] == 'in':
return float(value[:-2].strip()) * inch # 1pt == 1/72inch
elif value[-2:] == 'pt':
return float(value[:-2].strip())
elif value[-2:] == 'pc':
return float(value[:-2].strip()) * 12.0 # 1pc == 12pt
elif value[-2:] == 'px':
# XXX W3C says, use 96pdi
# http://www.w3.org/TR/CSS21/syndata.html#length-units
return float(value[:-2].strip()) * dpi96
elif value in ("none", "0", '0.0', "auto"):
return 0.0
elif relative:
if value[-3:] == 'rem': # XXX
# 1rem = 1 * fontSize
return float(value[:-3].strip()) * relative
elif value[-2:] == 'em': # XXX
# 1em = 1 * fontSize
return float(value[:-2].strip()) * relative
elif value[-2:] == 'ex': # XXX
# 1ex = 1/2 fontSize
return float(value[:-2].strip()) * (relative / 2.0)
elif value[-1:] == '%':
# 1% = (fontSize * 1) / 100
return (relative * float(value[:-1].strip())) / 100.0
elif value in ("normal", "inherit"):
return relative
elif value in _relativeSizeTable:
if base:
return max(MIN_FONT_SIZE, base * _relativeSizeTable[value])
return max(MIN_FONT_SIZE, relative * _relativeSizeTable[value])
elif value in _absoluteSizeTable:
if base:
return max(MIN_FONT_SIZE, base * _absoluteSizeTable[value])
return max(MIN_FONT_SIZE, relative * _absoluteSizeTable[value])
else:
return max(MIN_FONT_SIZE, relative * float(value))
try:
value = float(value)
except ValueError:
log.warning("getSize: Not a float %r", value)
return default # value = 0
return max(0, value)
except Exception:
log.warning("getSize %r %r", original, relative, exc_info=1)
return default
@memoized
def getCoords(x, y, w, h, pagesize):
"""
As a stupid programmer I like to use the upper left
corner of the document as the 0,0 coords therefore
we need to do some fancy calculations
"""
#~ print pagesize
ax, ay = pagesize
if x < 0:
x = ax + x
if y < 0:
y = ay + y
if w is not None and h is not None:
if w <= 0:
w = (ax - x + w)
if h <= 0:
h = (ay - y + h)
return x, (ay - y - h), w, h
return x, (ay - y)
@memoized
def getBox(box, pagesize):
"""
Parse sizes by corners in the form:
<X-Left> <Y-Upper> <Width> <Height>
The last to values with negative values are interpreted as offsets form
the right and lower border.
"""
box = str(box).split()
if len(box) != 4:
raise Exception("box not defined right way")
x, y, w, h = [getSize(pos) for pos in box]
return getCoords(x, y, w, h, pagesize)
def getFrameDimensions(data, page_width, page_height):
"""Calculate dimensions of a frame
Returns left, top, width and height of the frame in points.
"""
box = data.get("-pdf-frame-box", [])
if len(box) == 4:
return [getSize(x) for x in box]
top = getSize(data.get("top", 0))
left = getSize(data.get("left", 0))
bottom = getSize(data.get("bottom", 0))
right = getSize(data.get("right", 0))
if "height" in data:
height = getSize(data["height"])
if "top" in data:
top = getSize(data["top"])
bottom = page_height - (top + height)
elif "bottom" in data:
bottom = getSize(data["bottom"])
top = page_height - (bottom + height)
if "width" in data:
width = getSize(data["width"])
if "left" in data:
left = getSize(data["left"])
right = page_width - (left + width)
elif "right" in data:
right = getSize(data["right"])
left = page_width - (right + width)
top += getSize(data.get("margin-top", 0))
left += getSize(data.get("margin-left", 0))
bottom += getSize(data.get("margin-bottom", 0))
right += getSize(data.get("margin-right", 0))
width = page_width - (left + right)
height = page_height - (top + bottom)
return left, top, width, height
@memoized
def getPos(position, pagesize):
"""
Pair of coordinates
"""
position = str(position).split()
if len(position) != 2:
raise Exception("position not defined right way")
x, y = [getSize(pos) for pos in position]
return getCoords(x, y, None, None, pagesize)
def getBool(s):
" Is it a boolean? "
return str(s).lower() in ("y", "yes", "1", "true")
_uid = 0
def getUID():
" Unique ID "
global _uid
_uid += 1
return str(_uid)
_alignments = {
"left": TA_LEFT,
"center": TA_CENTER,
"middle": TA_CENTER,
"right": TA_RIGHT,
"justify": TA_JUSTIFY,
}
def getAlign(value, default=TA_LEFT):
return _alignments.get(str(value).lower(), default)
_rx_datauri = re.compile(
"^data:(?P<mime>[a-z]+/[a-z]+);base64,(?P<data>.*)$", re.M | re.DOTALL)
COLOR_BY_NAME = {
'activeborder': Color(212, 208, 200),
'activecaption': Color(10, 36, 106),
'aliceblue': Color(.941176, .972549, 1),
'antiquewhite': Color(.980392, .921569, .843137),
'appworkspace': Color(128, 128, 128),
'aqua': Color(0, 1, 1),
'aquamarine': Color(.498039, 1, .831373),
'azure': Color(.941176, 1, 1),
'background': Color(58, 110, 165),
'beige': Color(.960784, .960784, .862745),
'bisque': Color(1, .894118, .768627),
'black': Color(0, 0, 0),
'blanchedalmond': Color(1, .921569, .803922),
'blue': Color(0, 0, 1),
'blueviolet': Color(.541176, .168627, .886275),
'brown': Color(.647059, .164706, .164706),
'burlywood': Color(.870588, .721569, .529412),
'buttonface': Color(212, 208, 200),
'buttonhighlight': Color(255, 255, 255),
'buttonshadow': Color(128, 128, 128),
'buttontext': Color(0, 0, 0),
'cadetblue': Color(.372549, .619608, .627451),
'captiontext': Color(255, 255, 255),
'chartreuse': Color(.498039, 1, 0),
'chocolate': Color(.823529, .411765, .117647),
'coral': Color(1, .498039, .313725),
'cornflowerblue': Color(.392157, .584314, .929412),
'cornsilk': Color(1, .972549, .862745),
'crimson': Color(.862745, .078431, .235294),
'cyan': Color(0, 1, 1),
'darkblue': Color(0, 0, .545098),
'darkcyan': Color(0, .545098, .545098),
'darkgoldenrod': Color(.721569, .52549, .043137),
'darkgray': Color(.662745, .662745, .662745),
'darkgreen': Color(0, .392157, 0),
'darkgrey': Color(.662745, .662745, .662745),
'darkkhaki': Color(.741176, .717647, .419608),
'darkmagenta': Color(.545098, 0, .545098),
'darkolivegreen': Color(.333333, .419608, .184314),
'darkorange': Color(1, .54902, 0),
'darkorchid': Color(.6, .196078, .8),
'darkred': Color(.545098, 0, 0),
'darksalmon': Color(.913725, .588235, .478431),
'darkseagreen': Color(.560784, .737255, .560784),
'darkslateblue': Color(.282353, .239216, .545098),
'darkslategray': Color(.184314, .309804, .309804),
'darkslategrey': Color(.184314, .309804, .309804),
'darkturquoise': Color(0, .807843, .819608),
'darkviolet': Color(.580392, 0, .827451),
'deeppink': Color(1, .078431, .576471),
'deepskyblue': Color(0, .74902, 1),
'dimgray': Color(.411765, .411765, .411765),
'dimgrey': Color(.411765, .411765, .411765),
'dodgerblue': Color(.117647, .564706, 1),
'firebrick': Color(.698039, .133333, .133333),
'floralwhite': Color(1, .980392, .941176),
'forestgreen': Color(.133333, .545098, .133333),
'fuchsia': Color(1, 0, 1),
'gainsboro': Color(.862745, .862745, .862745),
'ghostwhite': Color(.972549, .972549, 1),
'gold': Color(1, .843137, 0),
'goldenrod': Color(.854902, .647059, .12549),
'gray': Color(.501961, .501961, .501961),
'graytext': Color(128, 128, 128),
'green': Color(0, .501961, 0),
'greenyellow': Color(.678431, 1, .184314),
'grey': Color(.501961, .501961, .501961),
'highlight': Color(10, 36, 106),
'highlighttext': Color(255, 255, 255),
'honeydew': Color(.941176, 1, .941176),
'hotpink': Color(1, .411765, .705882),
'inactiveborder': Color(212, 208, 200),
'inactivecaption': Color(128, 128, 128),
'inactivecaptiontext': Color(212, 208, 200),
'indianred': Color(.803922, .360784, .360784),
'indigo': Color(.294118, 0, .509804),
'infobackground': Color(255, 255, 225),
'infotext': Color(0, 0, 0),
'ivory': Color(1, 1, .941176),
'khaki': Color(.941176, .901961, .54902),
'lavender': Color(.901961, .901961, .980392),
'lavenderblush': Color(1, .941176, .960784),
'lawngreen': Color(.486275, .988235, 0),
'lemonchiffon': Color(1, .980392, .803922),
'lightblue': Color(.678431, .847059, .901961),
'lightcoral': Color(.941176, .501961, .501961),
'lightcyan': Color(.878431, 1, 1),
'lightgoldenrodyellow': Color(.980392, .980392, .823529),
'lightgray': Color(.827451, .827451, .827451),
'lightgreen': Color(.564706, .933333, .564706),
'lightgrey': Color(.827451, .827451, .827451),
'lightpink': Color(1, .713725, .756863),
'lightsalmon': Color(1, .627451, .478431),
'lightseagreen': Color(.12549, .698039, .666667),
'lightskyblue': Color(.529412, .807843, .980392),
'lightslategray': Color(.466667, .533333, .6),
'lightslategrey': Color(.466667, .533333, .6),
'lightsteelblue': Color(.690196, .768627, .870588),
'lightyellow': Color(1, 1, .878431),
'lime': Color(0, 1, 0),
'limegreen': Color(.196078, .803922, .196078),
'linen': Color(.980392, .941176, .901961),
'magenta': Color(1, 0, 1),
'maroon': Color(.501961, 0, 0),
'mediumaquamarine': Color(.4, .803922, .666667),
'mediumblue': Color(0, 0, .803922),
'mediumorchid': Color(.729412, .333333, .827451),
'mediumpurple': Color(.576471, .439216, .858824),
'mediumseagreen': Color(.235294, .701961, .443137),
'mediumslateblue': Color(.482353, .407843, .933333),
'mediumspringgreen': Color(0, .980392, .603922),
'mediumturquoise': Color(.282353, .819608, .8),
'mediumvioletred': Color(.780392, .082353, .521569),
'menu': Color(212, 208, 200),
'menutext': Color(0, 0, 0),
'midnightblue': Color(.098039, .098039, .439216),
'mintcream': Color(.960784, 1, .980392),
'mistyrose': Color(1, .894118, .882353),
'moccasin': Color(1, .894118, .709804),
'navajowhite': Color(1, .870588, .678431),
'navy': Color(0, 0, .501961),
'oldlace': Color(.992157, .960784, .901961),
'olive': Color(.501961, .501961, 0),
'olivedrab': Color(.419608, .556863, .137255),
'orange': Color(1, .647059, 0),
'orangered': Color(1, .270588, 0),
'orchid': Color(.854902, .439216, .839216),
'palegoldenrod': Color(.933333, .909804, .666667),
'palegreen': Color(.596078, .984314, .596078),
'paleturquoise': Color(.686275, .933333, .933333),
'palevioletred': Color(.858824, .439216, .576471),
'papayawhip': Color(1, .937255, .835294),
'peachpuff': Color(1, .854902, .72549),
'peru': Color(.803922, .521569, .247059),
'pink': Color(1, .752941, .796078),
'plum': Color(.866667, .627451, .866667),
'powderblue': Color(.690196, .878431, .901961),
'purple': Color(.501961, 0, .501961),
'red': Color(1, 0, 0),
'rosybrown': Color(.737255, .560784, .560784),
'royalblue': Color(.254902, .411765, .882353),
'saddlebrown': Color(.545098, .270588, .07451),
'salmon': Color(.980392, .501961, .447059),
'sandybrown': Color(.956863, .643137, .376471),
'scrollbar': Color(212, 208, 200),
'seagreen': Color(.180392, .545098, .341176),
'seashell': Color(1, .960784, .933333),
'sienna': Color(.627451, .321569, .176471),
'silver': Color(.752941, .752941, .752941),
'skyblue': Color(.529412, .807843, .921569),
'slateblue': Color(.415686, .352941, .803922),
'slategray': Color(.439216, .501961, .564706),
'slategrey': Color(.439216, .501961, .564706),
'snow': Color(1, .980392, .980392),
'springgreen': Color(0, 1, .498039),
'steelblue': Color(.27451, .509804, .705882),
'tan': Color(.823529, .705882, .54902),
'teal': Color(0, .501961, .501961),
'thistle': Color(.847059, .74902, .847059),
'threeddarkshadow': Color(64, 64, 64),
'threedface': Color(212, 208, 200),
'threedhighlight': Color(255, 255, 255),
'threedlightshadow': Color(212, 208, 200),
'threedshadow': Color(128, 128, 128),
'tomato': Color(1, .388235, .278431),
'turquoise': Color(.25098, .878431, .815686),
'violet': Color(.933333, .509804, .933333),
'wheat': Color(.960784, .870588, .701961),
'white': Color(1, 1, 1),
'whitesmoke': Color(.960784, .960784, .960784),
'window': Color(255, 255, 255),
'windowframe': Color(0, 0, 0),
'windowtext': Color(0, 0, 0),
'yellow': Color(1, 1, 0),
'yellowgreen': Color(.603922, .803922, .196078)
}
def get_default_asian_font():
lower_font_list = []
upper_font_list = []
font_dict = copy(reportlab.pdfbase._cidfontdata.defaultUnicodeEncodings)
fonts = font_dict.keys()
for font in fonts:
upper_font_list.append(font)
lower_font_list.append(font.lower())
default_asian_font = {lower_font_list[i]: upper_font_list[i] for i in range(len(lower_font_list))}
return default_asian_font
def set_asian_fonts(fontname):
font_dict = copy(reportlab.pdfbase._cidfontdata.defaultUnicodeEncodings)
fonts = font_dict.keys()
if fontname in fonts:
pdfmetrics.registerFont(UnicodeCIDFont(fontname))
def detect_language(name):
asian_language_list = xhtml2pdf.default.DEFAULT_LANGUAGE_LIST
if name in asian_language_list:
return name
def arabic_format(text, language):
# Note: right now all of the languages are treated the same way.
# But maybe in the future we have to for example implement something
# for "hebrew" that isn't used in "arabic"
if detect_language(language) in ('arabic', 'hebrew', 'persian', 'urdu', 'pashto', 'sindhi'):
ar = arabic_reshaper.reshape(text)
return get_display(ar)
else:
return None
def frag_text_language_check(context, frag_text):
if hasattr(context, 'language'):
language = context.__getattribute__('language')
detect_language_result = arabic_format(frag_text, language)
if detect_language_result:
return detect_language_result
| 33.93578 | 102 | 0.590971 |
07dd7cbe7b67fb5f32c3dbfce30eb29ea6e00c5c | 5,391 | py | Python | System/Mount Disk Image.app/Resources/mount_md_gui.py | alphamodh0/Utilities | d02caf12728bd2aa74eeed0952975798c7ac7cd1 | [
"BSD-2-Clause"
] | 1 | 2020-09-27T15:01:29.000Z | 2020-09-27T15:01:29.000Z | System/Mount Disk Image.app/Resources/mount_md_gui.py | alphamodh0/Utilities | d02caf12728bd2aa74eeed0952975798c7ac7cd1 | [
"BSD-2-Clause"
] | null | null | null | System/Mount Disk Image.app/Resources/mount_md_gui.py | alphamodh0/Utilities | d02caf12728bd2aa74eeed0952975798c7ac7cd1 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
import os, sys, socket, subprocess, time
# Translate this application using Qt .ts files without the need for compilation
import tstranslator
tstr = tstranslator.TsTranslator(os.path.dirname(__file__) + "/i18n", "")
def tr(input):
return tstr.tr(input)
from PyQt5 import QtWidgets, QtGui, QtCore
class GUI(object):
def __init__(self):
app = QtWidgets.QApplication(sys.argv)
if len(sys.argv) < 2:
filedialog = QtWidgets.QFileDialog()
filedialog.setDefaultSuffix("iso")
filedialog.setNameFilter(tr("Disk images (*.iso *.img *.ufs *.uzip);;All files (*.*)"))
filename = None
if filedialog.exec_():
filename = filedialog.selectedFiles()[0]
if not filename:
exit(0)
else:
self.image = filename
else:
self.image = sys.argv[1]
self.iconfile = None
self.file_symlink_resolved = os.path.join(sys.path[0], os.path.basename(os.path.realpath(sys.argv[0])))
for file in os.listdir(os.path.dirname(self.file_symlink_resolved)):
if file.endswith(".png"):
self.iconfile = os.path.dirname(self.file_symlink_resolved) + "/" + file
break
self.disk_image_iconfile = os.path.dirname(self.file_symlink_resolved) + "/diskimage.png"
self.prepare_progress_window()
if not os.path.exists(self.image):
self.showFatalError(tr("%s does not exist" % self.image))
self.process = QtCore.QProcess()
self.process.setProgram("mount_md") # chmod 6755 /sbin/mdconfig so that it runs as root:wheel
self.process.setArguments([self.image])
print(self.process.program() + " " + " ".join(self.process.arguments()))
codec = QtCore.QTextCodec.codecForLocale()
self.process._decoder_stdout = codec.makeDecoder()
self.process._decoder_stderr = codec.makeDecoder()
self.process.readyReadStandardOutput.connect(self._ready_read_standard_output)
self.process.readyReadStandardError.connect(self._ready_read_standard_error)
self.process.start()
while True:
QtWidgets.QApplication.processEvents() # Important trick so that the app stays responsive without the need for threading!
time.sleep(0.001)
def _ready_read_standard_output(self):
text = self.process._decoder_stdout.toUnicode(self.process.readAllStandardOutput())
lines = text.split("\n")
for line in lines:
line = line.strip()
print(line)
if line.startswith("Attaching"):
self.progress_window.open()
self.progress_window.show()
if line.startswith("Attached"):
# self.progress_window.hide()
timer = QtCore.QTimer()
timer.singleShot(2000, self.progress_window.hide)
if line.startswith("Removed"):
self.process.waitForFinished()
sys.exit(0)
def _ready_read_standard_error(self):
pass
def showNonfatalError(self, text):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setWindowTitle(" ")
msg.setText(text)
msg.exec_()
def showFatalError(self, text):
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Critical)
msg.setWindowTitle(" ")
msg.setText(text)
msg.exec_()
sys.exit(1)
def prepare_progress_window(self):
label = os.path.basename(self.image)
self.progress_window = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Information, " ", label, QtWidgets.QMessageBox.NoButton)
self.progress_window.setWindowFlag(QtCore.Qt.WindowCloseButtonHint, False) # Why does KWin still show it, even if unclickable?
self.progress_window.setStyleSheet("QDialogButtonBox,QTextEdit{min-width: 500px; } QLabel{min-height: 50px;} QProgressBar{min-width: 410px;}") # FIXME: Do this without hardcoding 410px
self.progress_window.setStandardButtons(QtWidgets.QMessageBox.NoButton)
self.progress_window.setIconPixmap(QtGui.QPixmap(self.disk_image_iconfile))
self.progress_window.layout().setAlignment(QtCore.Qt.AlignTop)
self.progress = QtWidgets.QProgressBar()
self.progress.setMaximum(0) # Indeterminate
self.progress.setMinimum(0)
self.progress.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
# Add the progress bar at the bottom (last row + 1) and first column with column span
# self.progress_window.layout().addWidget(self.progress, self.progress_window.layout().rowCount(), 0, 1, self.progress_window.layout().columnCount(), QtCore.Qt.AlignCenter)
self.progress_window.layout().addWidget(self.progress, 1, 1, 1, self.progress_window.layout().columnCount(),
QtCore.Qt.AlignCenter)
self.progress_window.layout().addWidget(QtWidgets.QLabel(), 1, 1, 1, self.progress_window.layout().columnCount(),
QtCore.Qt.AlignCenter)
def quit(self):
sys.exit(0)
if __name__ == "__main__":
g = GUI()
| 41.790698 | 192 | 0.638101 |
99c6c956a378afcda1247b0b075306e45a9bc8ff | 27 | py | Python | amnesia/modules/country/__init__.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 4 | 2015-05-08T10:57:56.000Z | 2021-05-17T04:32:11.000Z | amnesia/modules/country/__init__.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 6 | 2019-12-26T16:43:41.000Z | 2022-02-28T11:07:54.000Z | amnesia/modules/country/__init__.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 1 | 2019-09-23T14:08:11.000Z | 2019-09-23T14:08:11.000Z | from .model import Country
| 13.5 | 26 | 0.814815 |
88d7054c86555619aa9610b3f2cb7c5bea91103f | 7,899 | py | Python | backend/betterfly_34201/settings.py | crowdbotics-apps/betterfly-34201 | 54aa9710bc95f9c065e1faa63e19ca5df58fda2f | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/betterfly_34201/settings.py | crowdbotics-apps/betterfly-34201 | 54aa9710bc95f9c065e1faa63e19ca5df58fda2f | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/betterfly_34201/settings.py | crowdbotics-apps/betterfly-34201 | 54aa9710bc95f9c065e1faa63e19ca5df58fda2f | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
Django settings for betterfly_34201 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import io
import environ
import logging
import google.auth
from google.cloud import secretmanager
from google.auth.exceptions import DefaultCredentialsError
from google.api_core.exceptions import PermissionDenied
from modules.manifest import get_modules
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
env_file = os.path.join(BASE_DIR, ".env")
env = environ.Env()
env.read_env(env_file)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
try:
# Pull secrets from Secret Manager
_, project = google.auth.default()
client = secretmanager.SecretManagerServiceClient()
settings_name = os.environ.get("SETTINGS_NAME", "django_settings")
name = client.secret_version_path(project, settings_name, "latest")
payload = client.access_secret_version(name=name).payload.data.decode("UTF-8")
env.read_env(io.StringIO(payload))
except (DefaultCredentialsError, PermissionDenied):
pass
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'betterfly_34201.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'betterfly_34201.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
# GCP config
GS_BUCKET_NAME = env.str("GS_BUCKET_NAME", "")
if GS_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
GS_DEFAULT_ACL = "publicRead"
| 30.380769 | 112 | 0.736802 |
6ac96e783804f54ee5612a514605f70cc98540e4 | 234,780 | py | Python | src/sage/combinat/words/finite_word.py | vbraun/sage | 07d6c37d18811e2b377a9689790a7c5e24da16ba | [
"BSL-1.0"
] | 3 | 2016-06-19T14:48:31.000Z | 2022-01-28T08:46:01.000Z | src/sage/combinat/words/finite_word.py | vbraun/sage | 07d6c37d18811e2b377a9689790a7c5e24da16ba | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/words/finite_word.py | vbraun/sage | 07d6c37d18811e2b377a9689790a7c5e24da16ba | [
"BSL-1.0"
] | 7 | 2021-11-08T10:01:59.000Z | 2022-03-03T11:25:52.000Z | # -*- coding: utf-8 -*-
r"""
Finite word
AUTHORS:
- Arnaud Bergeron
- Amy Glen
- Sébastien Labbé
- Franco Saliola
- Julien Leroy (March 2010): reduced_rauzy_graph
EXAMPLES:
=========================
Creation of a finite word
=========================
Finite words from Python strings, lists and tuples::
sage: Word("abbabaab")
word: abbabaab
sage: Word([0, 1, 1, 0, 1, 0, 0, 1])
word: 01101001
sage: Word( ('a', 0, 5, 7, 'b', 9, 8) )
word: a057b98
Finite words from functions::
sage: f = lambda n : n%3
sage: Word(f, length=13)
word: 0120120120120
Finite words from iterators::
sage: from itertools import count
sage: Word(count(), length=10)
word: 0123456789
::
sage: Word( iter('abbccdef') )
word: abbccdef
Finite words from words via concatenation::
sage: u = Word("abcccabba")
sage: v = Word([0, 4, 8, 8, 3])
sage: u * v
word: abcccabba04883
sage: v * u
word: 04883abcccabba
sage: u + v
word: abcccabba04883
sage: u^3 * v^(8/5)
word: abcccabbaabcccabbaabcccabba04883048
Finite words from infinite words::
sage: vv = v^Infinity
sage: vv[10000:10015]
word: 048830488304883
Finite words in a specific combinatorial class::
sage: W = Words("ab")
sage: W
Finite and infinite words over {'a', 'b'}
sage: W("abbabaab")
word: abbabaab
sage: W(["a","b","b","a","b","a","a","b"])
word: abbabaab
sage: W( iter('ababab') )
word: ababab
Finite word as the image under a morphism::
sage: m = WordMorphism({0:[4,4,5,0],5:[0,5,5],4:[4,0,0,0]})
sage: m(0)
word: 4450
sage: m(0, order=2)
word: 400040000554450
sage: m(0, order=3)
word: 4000445044504450400044504450445044500550...
.. NOTE::
The following two finite words have the same string representation::
sage: w = Word('010120')
sage: z = Word([0, 1, 0, 1, 2, 0])
sage: w
word: 010120
sage: z
word: 010120
but are not equal::
sage: w == z
False
Indeed, w and z are defined on different alphabets::
sage: w[2]
'0'
sage: z[2]
0
========================
Functions and algorithms
========================
There are more than 100 functions defined on a finite word. Here are some
of them::
sage: w = Word('abaabbba'); w
word: abaabbba
sage: w.is_palindrome()
False
sage: w.is_lyndon()
False
sage: w.number_of_factors()
28
sage: w.critical_exponent()
3
::
sage: print(w.lyndon_factorization())
(ab, aabbb, a)
sage: print(w.crochemore_factorization())
(a, b, a, ab, bb, a)
::
sage: st = w.suffix_tree()
sage: st
Implicit Suffix Tree of the word: abaabbba
sage: st.show(word_labels=True)
::
sage: T = words.FibonacciWord('ab')
sage: T.longest_common_prefix(Word('abaabababbbbbb'))
word: abaababa
As matrix and many other sage objects, words have a parent::
sage: u = Word('xyxxyxyyy')
sage: u.parent()
Finite words over Set of Python objects of class 'object'
::
sage: v = Word('xyxxyxyyy', alphabet='xy')
sage: v.parent()
Finite words over {'x', 'y'}
========================
Factors and Rauzy Graphs
========================
Enumeration of factors, the successive values returned by ``next(it)``
can appear in a different order depending on hardware. Therefore we
mark the three first results of the test ``random``. The important test
is that the iteration stops properly on the fourth call::
sage: w = Word([4,5,6])^7
sage: it = w.factor_iterator(4)
sage: next(it) # random
word: 6456
sage: next(it) # random
word: 5645
sage: next(it) # random
word: 4564
sage: next(it)
Traceback (most recent call last):
...
StopIteration
The set of factors::
sage: sorted(w.factor_set(3))
[word: 456, word: 564, word: 645]
sage: sorted(w.factor_set(4))
[word: 4564, word: 5645, word: 6456]
sage: w.factor_set().cardinality()
61
Rauzy graphs::
sage: f = words.FibonacciWord()[:30]
sage: f.rauzy_graph(4)
Looped digraph on 5 vertices
sage: f.reduced_rauzy_graph(4)
Looped multi-digraph on 2 vertices
Left-special and bispecial factors::
sage: f.number_of_left_special_factors(7)
1
sage: f.bispecial_factors()
[word: , word: 0, word: 010, word: 010010, word: 01001010010]
"""
#*****************************************************************************
# Copyright (C) 2008 Arnaud Bergeron <abergeron@gmail.com>,
# 2008 Amy Glen <amy.glen@gmail.com>,
# 2008-2012 Sébastien Labbé <slabqc@gmail.com>,
# 2008-2010 Franco Saliola <saliola@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import print_function, absolute_import
from builtins import zip
from six.moves import range
from collections import defaultdict
from itertools import islice, cycle
from sage.combinat.words.abstract_word import Word_class
from sage.combinat.words.words import Words
from sage.misc.cachefunc import cached_method
from sage.combinat.words.word_options import word_options
from sage.rings.all import Integer, Infinity, ZZ
from sage.sets.set import Set
from sage.misc.superseded import deprecated_function_alias
class FiniteWord_class(Word_class):
def __str__(self):
r"""
Return the full (not truncated) string representation of the word
without identifier.
TESTS::
sage: Word('abc').__str__()
'abc'
sage: Word([0, 1, 0, 0, 1] * 10).__str__()
'01001010010100101001010010100101001010010100101001'
sage: Word([0,1,10,101]).__str__()
'0,1,10,101'
Insertion into a ``str``::
sage: w = Word(range(5))
sage: "Let's insert the word w = %s in this string." % w
"Let's insert the word w = 01234 in this string."
Using ``LatexExpr``::
sage: from sage.misc.latex import LatexExpr
sage: LatexExpr(w)
01234
With the ``print`` statement::
sage: print(w)
01234
No truncation is done for finite words::
sage: w = Word([i % 5 for i in range(60)])
sage: print(w)
012340123401234012340123401234012340123401234012340123401234
"""
global word_options
if word_options['display'] == 'string':
ls = word_options['letter_separator']
letters = [str(_) for _ in self]
if all(len(a)==1 for a in letters):
return ''.join(letters)
else:
return ls.join(letters)
elif word_options['display'] == 'list':
return str(list(self))
def _repr_(self):
r"""
Return a string representation of ``self``.
TESTS::
sage: Word(range(10))._repr_()
'word: 0123456789'
sage: Word(range(100))._repr_()
'word: 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,...'
"""
global word_options
if word_options['old_repr']:
if word_options['truncate'] and \
self.length() > word_options['truncate_length']:
return "Finite word of length %s over %s" % (self.length(), str(self.parent().alphabet())[17:])
return word_options['identifier'] + self.string_rep()
def coerce(self, other):
r"""
Try to return a pair of words with a common parent; raise an
exception if this is not possible.
This function begins by checking if both words have the same
parent. If this is the case, then no work is done and both words
are returned as-is.
Otherwise it will attempt to convert ``other`` to the domain of ``self``.
If that fails, it will attempt to convert ``self`` to the domain of
``other``. If both attempts fail, it raises a ``TypeError`` to signal
failure.
EXAMPLES::
sage: W1 = Words('abc'); W2 = Words('ab')
sage: w1 = W1('abc'); w2 = W2('abba'); w3 = W1('baab')
sage: w1.parent() is w2.parent()
False
sage: a, b = w1.coerce(w2)
sage: a.parent() is b.parent()
True
sage: w1.parent() is w2.parent()
False
"""
if self.parent() != other.parent():
try:
other = self.parent()(other)
other.parent()._check(other, length=None)
except Exception:
try:
self = other.parent()(self)
self.parent()._check(self, length=None)
except Exception:
raise TypeError("no coercion rule between %r and %r" % (self.parent(), other.parent()))
return self, other
def __hash__(self):
r"""
Return the hash for this word.
TESTS::
sage: h = hash(Word('abc')) # indirect test
sage: Word('abc').__hash__() == Word('abc').__hash__()
True
"""
if self._hash is None:
res = 5381
for s in self._to_integer_iterator():
res = ((res << 5) + res) + s
self._hash = res
return self._hash
def concatenate(self, other):
r"""
Return the concatenation of ``self`` and ``other``.
INPUT:
- ``other`` -- a word over the same alphabet as ``self``
EXAMPLES:
Concatenation may be made using ``+`` or ``*`` operations::
sage: w = Word('abadafd')
sage: y = Word([5,3,5,8,7])
sage: w * y
word: abadafd53587
sage: w + y
word: abadafd53587
sage: w.concatenate(y)
word: abadafd53587
Both words must be defined over the same alphabet::
sage: z = Word('12223', alphabet = '123')
sage: z + y
Traceback (most recent call last):
...
ValueError: 5 not in alphabet!
Eventually, it should work::
sage: z = Word('12223', alphabet = '123')
sage: z + y #todo: not implemented
word: 1222353587
TESTS:
The empty word is not considered by concatenation::
sage: type(Word([]) * Word('abcd'))
<class 'sage.combinat.words.word.FiniteWord_str'>
sage: type(Word('abcd') * Word())
<class 'sage.combinat.words.word.FiniteWord_str'>
sage: type(Word('abcd') * Word([]))
<class 'sage.combinat.words.word.FiniteWord_str'>
sage: type(Word('abcd') * Word(()))
<class 'sage.combinat.words.word.FiniteWord_str'>
sage: type(Word([1,2,3]) * Word(''))
<class 'sage.combinat.words.word.FiniteWord_list'>
Concatenation of finite words with infinite words works as expected::
sage: from itertools import repeat
sage: W = Words('ab')
sage: w1 = W('aba')
sage: w2 = W(repeat('b'), length='infinite')
sage: w1*w2
word: ababbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb...
sage: _.parent()
Infinite words over {'a', 'b'}
"""
if self.is_empty():
return other
if isinstance(other, Word_class) and other.is_empty():
return self
f = CallableFromListOfWords([self,other])
length = self.length() + other.length()
parent = self._parent
if length == Infinity:
parent = parent.shift()
return parent(f, datatype='callable', caching=True)
else:
return parent(f, length=length, datatype='callable', caching=True)
__mul__ = concatenate
__add__ = concatenate
# TODO: This function is using domain=range(n) for Word but
# should be a domain=slice(n) # Seb : Feb 23th : I think this is fine now!!
def __pow__(self, exp):
r"""
Return the ``exp``-th power of ``self``.
If ``exp`` is `\infty`, returns the infinite periodic word of base ``self``.
Otherwise, `|w|\cdot exp` must be a non-negative integer.
INPUT:
- ``exp`` -- an integer, a rational, a float number or plus infinity
OUTPUT:
word -- the ``exp``-th power of ``self``
EXAMPLES:
You can take non-negative integer powers::
sage: w = Word(range(6)); w
word: 012345
sage: w^2
word: 012345012345
sage: w^1
word: 012345
sage: w^0
word:
sage: w^(-1)
Traceback (most recent call last):
...
ValueError: Power of the word is not defined on the exponent -1: the length of the word (6) times the exponent (-1) must be a positive integer
You can take non-negative rational powers::
sage: w = Word(range(6)); w
word: 012345
sage: w^(.5)
word: 012
sage: w^(1/3)
word: 01
sage: (w*w)^(1/2) == w
True
sage: w^(5/2)
word: 012345012345012
...but the length of the word times the exponent must be an integer::
sage: w = Word(range(6))
sage: w^(1/4)
Traceback (most recent call last):
...
ValueError: Power of the word is not defined on the exponent 1/4: the length of the word (6) times the exponent (1/4) must be a positive integer
You can take infinite power::
sage: w = Word(range(6)); w
word: 012345
sage: u = w^oo; u
word: 0123450123450123450123450123450123450123...
sage: u[10000000:20000000]
word: 4501234501234501234501234501234501234501...
sage: u[10000000:10000020]
word: 45012345012345012345
sage: Word()^oo
word:
"""
# powers of the empty word
if self.is_empty():
return self
# infinite power of a non-empty word
fcn = lambda n: self[n % self.length()]
if exp is Infinity:
return self._parent.shift()(fcn)
#If exp*|self| is not an integer
length = exp* self.length()
if length in ZZ and length >= 0:
return self._parent(fcn, length=length)
else:
raise ValueError("Power of the word is not defined on the exponent {}:"
" the length of the word ({}) times the exponent ({}) must"
" be a positive integer".format(exp,self.length(),exp))
def length(self):
r"""
Return the length of ``self``.
TESTS::
sage: from sage.combinat.words.word import Word_class
sage: w = Word(iter('abba'*40), length="finite")
sage: w._len is None
True
sage: w.length()
160
sage: w = Word(iter('abba'), length=4)
sage: w._len
4
sage: w.length()
4
sage: def f(n):
....: return list(range(2,12,2))[n]
sage: w = Word(f, length=5)
sage: w.length()
5
"""
if self._len is None:
self._len = Integer(sum(1 for _ in self))
return self._len
def content(self, n=None):
r"""
Return content of ``self``.
INPUT:
- ``n`` -- (optional) an integer specifying the maximal
letter in the alphabet
OUTPUT:
- a list where the `i`-th entry indiciates the multiplicity
of the `i`-th letter in the alphabet in ``self``
EXAMPLES::
sage: w = Word([1,2,4,3,2,2,2])
sage: w.content()
[1, 4, 1, 1]
sage: w = Word([3,1])
sage: w.content()
[1, 1]
sage: w.content(n=3)
[1, 0, 1]
sage: w = Word([2,4],alphabet=[1,2,3,4])
sage: w.content(n=3)
[0, 1, 0]
sage: w.content()
[0, 1, 0, 1]
"""
if n is not None:
alphabet = range(1,n+1)
elif not self.parent().alphabet().cardinality() == +Infinity:
alphabet = self.parent().alphabet()
else:
alphabet = sorted(self.letters())
return [self.count(i) for i in alphabet]
def is_yamanouchi(self, n=None):
r"""
Return whether ``self`` is Yamanouchi.
A word `w` is Yamanouchi if, when read from right to left, it
always has weakly more `i`'s than `i+1`'s for all `i` that
appear in `w`.
INPUT:
- ``n`` -- (optional) an integer specifying the maximal
letter in the alphabet
EXAMPLES::
sage: w = Word([1,2,4,3,2,2,2])
sage: w.is_yamanouchi()
False
sage: w = Word([2,3,4,3,1,2,1,1,2,1])
sage: w.is_yamanouchi()
True
sage: w = Word([3,1])
sage: w.is_yamanouchi(n=3)
False
sage: w.is_yamanouchi()
True
sage: w = Word([3,1],alphabet=[1,2,3])
sage: w.is_yamanouchi()
False
sage: w = Word([2,1,1,2])
sage: w.is_yamanouchi()
False
"""
from sage.combinat.words.word import Word
if n is not None:
w = Word(self, alphabet=list(range(1,n+1)))
elif not self.parent().alphabet().cardinality() == +Infinity:
w = self
else:
w = Word(self, alphabet=sorted(self.letters()))
l = w.length()
for a in range(l-1,-1,-1):
mu = w.parent()(self[a:]).content()
if not all(mu[i] >= mu[i+1] for i in range(len(mu)-1)):
return False
return True
def schuetzenberger_involution(self, n = None):
"""
Return the Schützenberger involution of the word ``self``, which is obtained
by reverting the word and then complementing all letters within the
underlying ordered alphabet. If ``n`` is specified, the underlying
alphabet is assumed to be `[1,2,\ldots,n]`. If no alphabet is specified,
`n` is the maximal letter appearing in ``self``.
INPUT:
- ``self`` -- a word
- ``n`` -- an integer specifying the maximal letter in the alphabet (optional)
OUTPUT:
a word, the Schützenberger involution of ``self``
EXAMPLES::
sage: w = Word([9,7,4,1,6,2,3])
sage: v = w.schuetzenberger_involution(); v
word: 7849631
sage: v.parent()
Finite words over Set of Python objects of class 'object'
sage: w = Word([1,2,3],alphabet=[1,2,3,4,5])
sage: v = w.schuetzenberger_involution();v
word: 345
sage: v.parent()
Finite words over {1, 2, 3, 4, 5}
sage: w = Word([1,2,3])
sage: v = w.schuetzenberger_involution(n=5);v
word: 345
sage: v.parent()
Finite words over Set of Python objects of class 'object'
sage: w = Word([11,32,69,2,53,1,2,3,18,41])
sage: w.schuetzenberger_involution()
word: 29,52,67,68,69,17,68,1,38,59
sage: w = Word([],alphabet=[1,2,3,4,5])
sage: w.schuetzenberger_involution()
word:
sage: w = Word([])
sage: w.schuetzenberger_involution()
word:
"""
if self.length() == 0:
return self
r = self.reversal()
w = list(r)
parent = self.parent()
if n is None:
alphsize = parent.alphabet().cardinality()
if not alphsize == +Infinity:
n = max(parent.alphabet())
elif r.length()>0:
n = max(w)
for k in range(r.length()):
w[k] = n+1 - w[k]
return parent(w, check=False)
def foata_bijection(self):
r"""
Return word ``self`` under the Foata bijection.
The Foata bijection `\phi` is a bijection on the set of words
of given content (by a slight generalization of Section 2 in [FoSc78]_).
It can be defined by induction on the size of the word: Given a word
`w_1 w_2 \cdots w_n`, start with `\phi(w_1) = w_1`. At the `i`-th step, if
`\phi(w_1 w_2 \cdots w_i) = v_1 v_2 \cdots v_i`, we define
`\phi(w_1 w_2 \cdots w_i w_{i+1})` by placing `w_{i+1}` on the end of
the word `v_1 v_2 \cdots v_i` and breaking the word up into blocks
as follows. If `w_{i+1} \ge v_i`, place a vertical line to the right
of each `v_k` for which `w_{i+1} \ge v_k`. Otherwise, if
`w_{i+1} < v_i`, place a vertical line to the right of each `v_k`
for which `w_{i+1} < v_k`. In either case, place a vertical line at
the start of the word as well. Now, within each block between
vertical lines, cyclically shift the entries one place to the
right.
For instance, to compute `\phi([4,1,5,4,2,2,3])`, the sequence of
words is
* `4`,
* `|4|1 \to 41`,
* `|4|1|5 \to 415`,
* `|415|4 \to 5414`,
* `|5|4|14|2 \to 54412`,
* `|5441|2|2 \to 154422`,
* `|1|5442|2|3 \to 1254423`.
So `\phi([4,1,5,4,2,2,3]) = [1,2,5,4,4,2,3]`.
.. SEEALSO::
:meth:`Foata bijection on Permutations <sage.combinat.permutation.Permutation.foata_bijection()>`.
EXAMPLES::
sage: w = Word([2,2,2,1,1,1])
sage: w.foata_bijection()
word: 112221
sage: w = Word([2,2,1,2,2,2,1,1,2,1])
sage: w.foata_bijection()
word: 2122212211
sage: w = Word([4,1,5,4,2,2,3])
sage: w.foata_bijection()
word: 1254423
TESTS::
sage: w = Word('121314')
sage: w.foata_bijection()
word: 231114
sage: w = Word('1133a1')
sage: w.foata_bijection()
word: 3113a1
"""
s = self.standard_permutation()
ordered_alphabet = sorted(self.letters(),
key=self.parent().sortkey_letters)
eval_dict = self.evaluation_dict()
weight = [eval_dict[a] for a in ordered_alphabet]
return (s.foata_bijection()).destandardize(weight, ordered_alphabet=ordered_alphabet)
def major_index(self, final_descent=False):
r"""
Return the major index of ``self``.
The major index of a word `w` is the sum of the descents of `w`.
With the ``final_descent`` option, the last position of a
non-empty word is also considered as a descent.
.. SEEALSO::
:meth:`major index on Permutations <sage.combinat.permutation.Permutation.major_index()>`.
EXAMPLES::
sage: w = Word([2,1,3,3,2])
sage: w.major_index()
5
sage: w = Word([2,1,3,3,2])
sage: w.major_index(final_descent=True)
10
"""
return (self.standard_permutation()).major_index(final_descent=final_descent)
def number_of_inversions(self):
r"""
Return the number of inversions in ``self``.
An inversion of a word `w = w_1 \ldots w_n` is a pair of indices `(i, j)`
with `i < j` and `w_i > w_j`.
.. SEEALSO::
:meth:`number of inversions on Permutations <sage.combinat.permutation.Permutation.number_of_inversions()>`.
EXAMPLES::
sage: w = Word([2,1,3,3,2])
sage: w.number_of_inversions()
3
"""
return (self.standard_permutation()).number_of_inversions()
def is_empty(self):
r"""
Return ``True`` if the length of ``self`` is zero, and ``False`` otherwise.
EXAMPLES::
sage: Word([]).is_empty()
True
sage: Word('a').is_empty()
False
"""
return self.length()==0
def is_finite(self):
r"""
Return ``True``.
EXAMPLES::
sage: Word([]).is_finite()
True
sage: Word('a').is_finite()
True
"""
return True
def to_integer_word(self):
r"""
Return a word defined over the integers ``[0,1,...,self.length()-1]``
whose letters are in the same relative order in the parent.
EXAMPLES::
sage: from itertools import count
sage: w = Word('abbabaab')
sage: w.to_integer_word()
word: 01101001
sage: w = Word(iter("cacao"), length="finite")
sage: w.to_integer_word()
word: 10102
sage: w = Words([3,2,1])([2,3,3,1])
sage: w.to_integer_word()
word: 1002
"""
from sage.combinat.words.word import Word
return Word(self.to_integer_list())
def to_integer_list(self):
r"""
Return a list of integers from ``[0,1,...,self.length()-1]`` in the
same relative order as the letters in ``self`` in the parent.
EXAMPLES::
sage: from itertools import count
sage: w = Word('abbabaab')
sage: w.to_integer_list()
[0, 1, 1, 0, 1, 0, 0, 1]
sage: w = Word(iter("cacao"), length="finite")
sage: w.to_integer_list()
[1, 0, 1, 0, 2]
sage: w = Words([3,2,1])([2,3,3,1])
sage: w.to_integer_list()
[1, 0, 0, 2]
"""
cmp_key = self._parent.sortkey_letters
ordered_alphabet = sorted(self.letters(), key=cmp_key)
index = dict((b,a) for (a,b) in enumerate(ordered_alphabet))
return [index[a] for a in self]
# To fix : do not slice here ! (quite expensive in copy)
def is_suffix(self, other):
r"""
Return ``True`` if ``self`` is a suffix of ``other``, and ``False`` otherwise.
EXAMPLES::
sage: w = Word('0123456789')
sage: y = Word('56789')
sage: y.is_suffix(w)
True
sage: w.is_suffix(y)
False
sage: Word('579').is_suffix(w)
False
sage: Word().is_suffix(y)
True
sage: w.is_suffix(Word())
False
sage: Word().is_suffix(Word())
True
"""
return self.is_empty() or self == other[-self.length():]
def is_proper_suffix(self, other):
r"""
Return ``True`` if ``self`` is a proper suffix of ``other``, and ``False`` otherwise.
EXAMPLES::
sage: Word('23').is_proper_suffix(Word('123'))
True
sage: Word('12').is_proper_suffix(Word('12'))
False
sage: Word().is_proper_suffix(Word('123'))
True
sage: Word('123').is_proper_suffix(Word('12'))
False
"""
return self.is_suffix(other) and self.length() < other.length()
def has_suffix(self, other):
"""
Test whether ``self`` has ``other`` as a suffix.
.. note::
Some word datatype classes, like :class:`WordDatatype_str`,
override this method.
INPUT:
- ``other`` -- a word, or data describing a word
OUTPUT:
boolean
EXAMPLES::
sage: w = Word("abbabaabababa")
sage: u = Word("ababa")
sage: w.has_suffix(u)
True
sage: u.has_suffix(w)
False
sage: u.has_suffix("ababa")
True
::
sage: w = Word([0,1,1,0,1,0,0,1,0,1,0,1,0])
sage: u = Word([0,1,0,1,0])
sage: w.has_suffix(u)
True
sage: u.has_suffix(w)
False
sage: u.has_suffix([0,1,0,1,0])
True
"""
from sage.combinat.words.word import Word
w = Word(other)
return w.is_suffix(self)
def is_prefix(self, other):
r"""
Return ``True`` if ``self`` is a prefix of ``other``, and ``False`` otherwise.
EXAMPLES::
sage: w = Word('0123456789')
sage: y = Word('012345')
sage: y.is_prefix(w)
True
sage: w.is_prefix(y)
False
sage: w.is_prefix(Word())
False
sage: Word().is_prefix(w)
True
sage: Word().is_prefix(Word())
True
"""
return self == other[:self.length()]
def is_proper_prefix(self, other):
r"""
Return ``True`` if ``self`` is a proper prefix of ``other``, and ``False`` otherwise.
EXAMPLES::
sage: Word('12').is_proper_prefix(Word('123'))
True
sage: Word('12').is_proper_prefix(Word('12'))
False
sage: Word().is_proper_prefix(Word('123'))
True
sage: Word('123').is_proper_prefix(Word('12'))
False
sage: Word().is_proper_prefix(Word())
False
"""
return self.is_prefix(other) and self.length() < other.length()
def has_prefix(self, other):
r"""
Test whether ``self`` has ``other`` as a prefix.
INPUT:
- ``other`` -- a word, or data describing a word
OUTPUT:
boolean
EXAMPLES::
sage: w = Word("abbabaabababa")
sage: u = Word("abbab")
sage: w.has_prefix(u)
True
sage: u.has_prefix(w)
False
sage: u.has_prefix("abbab")
True
::
sage: w = Word([0,1,1,0,1,0,0,1,0,1,0,1,0])
sage: u = Word([0,1,1,0,1])
sage: w.has_prefix(u)
True
sage: u.has_prefix(w)
False
sage: u.has_prefix([0,1,1,0,1])
True
"""
from sage.combinat.words.word import Word
w = Word(other)
return w.is_prefix(self)
def reversal(self):
r"""
Return the reversal of ``self``.
EXAMPLES::
sage: Word('124563').reversal()
word: 365421
"""
return self[::-1]
@cached_method
def prefix_function_table(self):
r"""
Return a vector containing the length of the proper prefix-suffixes
for all the non-empty prefixes of ``self``.
EXAMPLES::
sage: Word('121321').prefix_function_table()
[0, 0, 1, 0, 0, 1]
sage: Word('1241245').prefix_function_table()
[0, 0, 0, 1, 2, 3, 0]
sage: Word().prefix_function_table()
[]
"""
k = 0
res = [0]*self.length()
for q in range(1, self.length()):
while k > 0 and self[k] != self[q]:
k = res[k-1]
if self[k] == self[q]:
k += 1
res[q] = k
return res
@cached_method
def good_suffix_table(self):
r"""
Return a table of the maximum skip you can do in order not to miss
a possible occurrence of ``self`` in a word.
This is a part of the Boyer-Moore algorithm to find factors. See [1].
EXAMPLES::
sage: Word('121321').good_suffix_table()
[5, 5, 5, 5, 3, 3, 1]
sage: Word('12412').good_suffix_table()
[3, 3, 3, 3, 3, 1]
REFERENCES:
- [1] R.S. Boyer, J.S. Moore, A fast string searching algorithm,
Communications of the ACM 20 (1977) 762--772.
"""
l = self.length()
p = self.reversal().prefix_function_table()
res = [l - p[-1]]*(l+1)
for i in range(1, l+1):
j = l - p[i - 1]
if res[j] > (i - p[i-1]):
res[j] = i - p[i-1]
return res
@cached_method
def suffix_trie(self):
r"""
Return the suffix trie of ``self``.
The *suffix trie* of a finite word `w` is a data structure
representing the factors of `w`. It is a tree whose edges are
labelled with letters of `w`, and whose leafs correspond to
suffixes of `w`.
Type ``sage.combinat.words.suffix_trees.SuffixTrie?`` for more information.
EXAMPLES::
sage: w = Word("cacao")
sage: w.suffix_trie()
Suffix Trie of the word: cacao
::
sage: w = Word([0,1,0,1,1])
sage: w.suffix_trie()
Suffix Trie of the word: 01011
"""
from sage.combinat.words.suffix_trees import SuffixTrie
return SuffixTrie(self)
def implicit_suffix_tree(self):
r"""
Return the implicit suffix tree of ``self``.
The *suffix tree* of a word `w` is a compactification of the
suffix trie for `w`. The compactification removes all nodes that have
exactly one incoming edge and exactly one outgoing edge. It consists of
two components: a tree and a word. Thus, instead of labelling the edges
by factors of `w`, we can label them by indices of the occurrence of
the factors in `w`.
Type ``sage.combinat.words.suffix_trees.ImplicitSuffixTree?`` for more information.
EXAMPLES::
sage: w = Word("cacao")
sage: w.implicit_suffix_tree()
Implicit Suffix Tree of the word: cacao
::
sage: w = Word([0,1,0,1,1])
sage: w.implicit_suffix_tree()
Implicit Suffix Tree of the word: 01011
"""
from sage.combinat.words.suffix_trees import ImplicitSuffixTree
return ImplicitSuffixTree(self)
@cached_method
def suffix_tree(self):
r"""
Alias for ``implicit_suffix_tree()``.
EXAMPLES::
sage: Word('abbabaab').suffix_tree()
Implicit Suffix Tree of the word: abbabaab
"""
return self.implicit_suffix_tree()
def number_of_factors(self, n=None, algorithm='suffix tree'):
r"""
Count the number of distinct factors of ``self``.
INPUT:
- ``n`` -- an integer, or ``None``.
- ``algorithm`` -- string (default: ``'suffix tree'``), takes the
following values:
- ``'suffix tree'`` -- construct and use the suffix tree of the word
- ``'naive'`` -- algorithm uses a sliding window
OUTPUT:
If ``n`` is an integer, returns the number of distinct factors
of length ``n``. If ``n`` is ``None``, returns the total number of
distinct factors.
EXAMPLES::
sage: w = Word([1,2,1,2,3])
sage: w.number_of_factors()
13
sage: [w.number_of_factors(i) for i in range(6)]
[1, 3, 3, 3, 2, 1]
::
sage: w = words.ThueMorseWord()[:100]
sage: [w.number_of_factors(i) for i in range(10)]
[1, 2, 4, 6, 10, 12, 16, 20, 22, 24]
::
sage: Word('1213121').number_of_factors()
22
sage: Word('1213121').number_of_factors(1)
3
::
sage: Word('a'*100).number_of_factors()
101
sage: Word('a'*100).number_of_factors(77)
1
::
sage: Word().number_of_factors()
1
sage: Word().number_of_factors(17)
0
::
sage: blueberry = Word("blueberry")
sage: blueberry.number_of_factors()
43
sage: [blueberry.number_of_factors(i) for i in range(10)]
[1, 6, 8, 7, 6, 5, 4, 3, 2, 1]
"""
if algorithm == 'suffix tree':
return self.suffix_tree().number_of_factors(n)
elif algorithm == 'naive':
return len(self.factor_set(n, algorithm='naive'))
else:
raise ValueError('Unknown algorithm (={})'.format(algorithm))
def factor_iterator(self, n=None):
r"""
Generate distinct factors of ``self``.
INPUT:
- ``n`` -- an integer, or ``None``.
OUTPUT:
If ``n`` is an integer, returns an iterator over all distinct
factors of length ``n``. If ``n`` is ``None``, returns an iterator
generating all distinct factors.
EXAMPLES::
sage: w = Word('1213121')
sage: sorted( w.factor_iterator(0) )
[word: ]
sage: sorted( w.factor_iterator(10) )
[]
sage: sorted( w.factor_iterator(1) )
[word: 1, word: 2, word: 3]
sage: sorted( w.factor_iterator(4) )
[word: 1213, word: 1312, word: 2131, word: 3121]
sage: sorted( w.factor_iterator() )
[word: , word: 1, word: 12, word: 121, word: 1213, word: 12131, word: 121312, word: 1213121, word: 13, word: 131, word: 1312, word: 13121, word: 2, word: 21, word: 213, word: 2131, word: 21312, word: 213121, word: 3, word: 31, word: 312, word: 3121]
::
sage: u = Word([1,2,1,2,3])
sage: sorted( u.factor_iterator(0) )
[word: ]
sage: sorted( u.factor_iterator(10) )
[]
sage: sorted( u.factor_iterator(1) )
[word: 1, word: 2, word: 3]
sage: sorted( u.factor_iterator(5) )
[word: 12123]
sage: sorted( u.factor_iterator() )
[word: , word: 1, word: 12, word: 121, word: 1212, word: 12123, word: 123, word: 2, word: 21, word: 212, word: 2123, word: 23, word: 3]
::
sage: xxx = Word("xxx")
sage: sorted( xxx.factor_iterator(0) )
[word: ]
sage: sorted( xxx.factor_iterator(4) )
[]
sage: sorted( xxx.factor_iterator(2) )
[word: xx]
sage: sorted( xxx.factor_iterator() )
[word: , word: x, word: xx, word: xxx]
::
sage: e = Word()
sage: sorted( e.factor_iterator(0) )
[word: ]
sage: sorted( e.factor_iterator(17) )
[]
sage: sorted( e.factor_iterator() )
[word: ]
TESTS::
sage: type( Word('cacao').factor_iterator() )
<... 'generator'>
"""
return self.suffix_tree().factor_iterator(n)
def factor_set(self, n=None, algorithm='suffix tree'):
r"""
Return the set of factors (of length ``n``) of ``self``.
INPUT:
- ``n`` -- an integer or ``None`` (default: ``None``).
- ``algorithm`` -- string (default: ``'suffix tree'``), takes the
following values:
- ``'suffix tree'`` -- construct and use the suffix tree of the word
- ``'naive'`` -- algorithm uses a sliding window
OUTPUT:
If ``n`` is an integer, returns the set of all distinct
factors of length ``n``. If ``n`` is ``None``, returns the set
of all distinct factors.
EXAMPLES::
sage: w = Word('121')
sage: sorted(w.factor_set())
[word: , word: 1, word: 12, word: 121, word: 2, word: 21]
sage: sorted(w.factor_set(algorithm='naive'))
[word: , word: 1, word: 12, word: 121, word: 2, word: 21]
::
sage: w = Word('1213121')
sage: for i in range(w.length()): sorted(w.factor_set(i))
[word: ]
[word: 1, word: 2, word: 3]
[word: 12, word: 13, word: 21, word: 31]
[word: 121, word: 131, word: 213, word: 312]
[word: 1213, word: 1312, word: 2131, word: 3121]
[word: 12131, word: 13121, word: 21312]
[word: 121312, word: 213121]
::
sage: w = Word([1,2,1,2,3])
sage: s = w.factor_set()
sage: sorted(s)
[word: , word: 1, word: 12, word: 121, word: 1212, word: 12123, word: 123, word: 2, word: 21, word: 212, word: 2123, word: 23, word: 3]
TESTS::
sage: w = Word("xx")
sage: s = w.factor_set()
sage: sorted(s)
[word: , word: x, word: xx]
::
sage: Set(Word().factor_set())
{word: }
::
sage: w = Word(range(10), alphabet=range(10))
sage: S1 = w.factor_set(3, algorithm='suffix tree')
sage: S2 = w.factor_set(3, algorithm='naive')
sage: S1 == S2
True
"""
if algorithm == 'suffix tree':
return Set(self.factor_iterator(n))
elif algorithm == 'naive':
if n is None:
S = set([self[0:0]])
for n in range(1, self.length()+1):
for i in range(self.length()-n+1):
S.add(self[i:i+n])
return Set(S)
else:
S = set()
for i in range(self.length()-n+1):
S.add(self[i:i+n])
return Set(S)
else:
raise ValueError('Unknown algorithm (={})'.format(algorithm))
def topological_entropy(self, n):
r"""
Return the topological entropy for the factors of length ``n``.
The topological entropy of a sequence `u` is defined as the
exponential growth rate of the complexity of `u` as the length
increases: `H_{top}(u)=\lim_{n\to\infty}\frac{\log_d(p_u(n))}{n}`
where `d` denotes the cardinality of the alphabet and `p_u(n)` is
the complexity function, i.e. the number of factors of length `n`
in the sequence `u` [1].
INPUT:
- ``self`` -- a word defined over a finite alphabet
- ``n`` -- positive integer
OUTPUT:
real number (a symbolic expression)
EXAMPLES::
sage: W = Words([0, 1])
sage: w = W([0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1])
sage: t = w.topological_entropy(3); t
1/3*log(7)/log(2)
sage: n(t)
0.935784974019201
::
sage: w = words.ThueMorseWord()[:100]
sage: topo = w.topological_entropy
sage: for i in range(0, 41, 5):
....: print("{} {}".format(i, n(topo(i), digits=5)))
0 1.0000
5 0.71699
10 0.48074
15 0.36396
20 0.28774
25 0.23628
30 0.20075
35 0.17270
40 0.14827
If no alphabet is specified, an error is raised::
sage: w = Word(range(20))
sage: w.topological_entropy(3)
Traceback (most recent call last):
...
TypeError: The word must be defined over a finite alphabet
The following is ok::
sage: W = Words(range(20))
sage: w = W(range(20))
sage: w.topological_entropy(3)
1/3*log(18)/log(20)
REFERENCES:
[1] N. Pytheas Fogg, Substitutions in Dynamics, Arithmetics,
and Combinatorics, Lecture Notes in Mathematics 1794, Springer
Verlag. V. Berthe, S. Ferenczi, C. Mauduit and A. Siegel, Eds.
(2002).
"""
d = self.parent().alphabet().cardinality()
if d is Infinity:
raise TypeError("The word must be defined over a finite alphabet")
if n == 0:
return 1
pn = self.number_of_factors(n)
from sage.functions.all import log
return log(pn, base=d)/n
def rauzy_graph(self, n):
r"""
Return the Rauzy graph of the factors of length ``n`` of ``self``.
The vertices are the factors of length `n` and there is an edge from
`u` to `v` if `ua = bv` is a factor of length `n+1` for some letters
`a` and `b`.
INPUT:
- ``n`` -- integer
EXAMPLES::
sage: w = Word(range(10)); w
word: 0123456789
sage: g = w.rauzy_graph(3); g
Looped digraph on 8 vertices
sage: WordOptions(identifier='')
sage: g.vertices()
[012, 123, 234, 345, 456, 567, 678, 789]
sage: g.edges()
[(012, 123, 3),
(123, 234, 4),
(234, 345, 5),
(345, 456, 6),
(456, 567, 7),
(567, 678, 8),
(678, 789, 9)]
sage: WordOptions(identifier='word: ')
::
sage: f = words.FibonacciWord()[:100]
sage: f.rauzy_graph(8)
Looped digraph on 9 vertices
::
sage: w = Word('1111111')
sage: g = w.rauzy_graph(3)
sage: g.edges()
[(word: 111, word: 111, word: 1)]
::
sage: w = Word('111')
sage: for i in range(5) : w.rauzy_graph(i)
Looped multi-digraph on 1 vertex
Looped digraph on 1 vertex
Looped digraph on 1 vertex
Looped digraph on 1 vertex
Looped digraph on 0 vertices
Multi-edges are allowed for the empty word::
sage: W = Words('abcde')
sage: w = W('abc')
sage: w.rauzy_graph(0)
Looped multi-digraph on 1 vertex
sage: _.edges()
[(word: , word: , word: a),
(word: , word: , word: b),
(word: , word: , word: c)]
"""
from sage.graphs.digraph import DiGraph
multiedges = n == 0
g = DiGraph(loops=True, multiedges=multiedges)
if n == self.length():
g.add_vertex(self)
else:
for w in self.factor_iterator(n+1):
u = w[:-1]
v = w[1:]
a = w[-1:]
g.add_edge(u,v,a)
return g
def reduced_rauzy_graph(self, n):
r"""
Return the reduced Rauzy graph of order ``n`` of ``self``.
INPUT:
- ``n`` -- a non-negative integer. Every vertex of a reduced
Rauzy graph of order ``n`` is a factor of length ``n`` of ``self``.
OUTPUT:
a looped multi-digraph
DEFINITION:
For infinite periodic words (resp. for finite words of type `u^i
u[0:j]`), the reduced Rauzy graph of order `n` (resp. for `n`
smaller or equal to `(i-1)|u|+j`) is the directed graph whose
unique vertex is the prefix `p` of length `n` of ``self`` and which has
an only edge which is a loop on `p` labelled by `w[n+1:|w|] p`
where `w` is the unique return word to `p`.
In other cases, it is the directed graph defined as followed. Let
`G_n` be the Rauzy graph of order `n` of self. The vertices are the
vertices of `G_n` that are either special or not prolongable to the
right or to the left. For each couple (`u`, `v`) of such vertices
and each directed path in `G_n` from `u` to `v` that contains no
other vertices that are special, there is an edge from `u` to `v`
in the reduced Rauzy graph of order `n` whose label is the label of
the path in `G_n`.
.. NOTE::
In the case of infinite recurrent non-periodic words, this
definition corresponds to the following one that can be found in
[1] and [2] where a simple path is a path that begins with a
special factor, ends with a special factor and contains no
other vertices that are special:
The reduced Rauzy graph of factors of length `n` is obtained
from `G_n` by replacing each simple path `P=v_1 v_2 ...
v_{\ell}` with an edge `v_1 v_{\ell}` whose label is the
concatenation of the labels of the edges of `P`.
EXAMPLES::
sage: w = Word(range(10)); w
word: 0123456789
sage: g = w.reduced_rauzy_graph(3); g
Looped multi-digraph on 2 vertices
sage: g.vertices()
[word: 012, word: 789]
sage: g.edges()
[(word: 012, word: 789, word: 3456789)]
For the Fibonacci word::
sage: f = words.FibonacciWord()[:100]
sage: g = f.reduced_rauzy_graph(8);g
Looped multi-digraph on 2 vertices
sage: g.vertices()
[word: 01001010, word: 01010010]
sage: g.edges()
[(word: 01001010, word: 01010010, word: 010), (word: 01010010, word: 01001010, word: 01010), (word: 01010010, word: 01001010, word: 10)]
For periodic words::
sage: from itertools import cycle
sage: w = Word(cycle('abcd'))[:100]
sage: g = w.reduced_rauzy_graph(3)
sage: g.edges()
[(word: abc, word: abc, word: dabc)]
::
sage: w = Word('111')
sage: for i in range(5) : w.reduced_rauzy_graph(i)
Looped digraph on 1 vertex
Looped digraph on 1 vertex
Looped digraph on 1 vertex
Looped multi-digraph on 1 vertex
Looped multi-digraph on 0 vertices
For ultimately periodic words::
sage: sigma = WordMorphism('a->abcd,b->cd,c->cd,d->cd')
sage: w = sigma.fixed_point('a')[:100]; w
word: abcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcdcd...
sage: g = w.reduced_rauzy_graph(5)
sage: g.vertices()
[word: abcdc, word: cdcdc]
sage: g.edges()
[(word: abcdc, word: cdcdc, word: dc), (word: cdcdc, word: cdcdc, word: dc)]
AUTHOR:
Julien Leroy (March 2010): initial version
REFERENCES:
- [1] M. Bucci et al. A. De Luca, A. Glen, L. Q. Zamboni, A
connection between palindromic and factor complexity using
return words," Advances in Applied Mathematics 42 (2009) 60-74.
- [2] L'ubomira Balkova, Edita Pelantova, and Wolfgang Steiner.
Sequences with constant number of return words. Monatsh. Math,
155 (2008) 251-263.
"""
from sage.graphs.digraph import DiGraph
from copy import copy
g = copy(self.rauzy_graph(n))
# Otherwise it changes the rauzy_graph function.
l = [v for v in g if g.in_degree(v)==1 and g.out_degree(v)==1]
if g.num_verts() != 0 and len(l) == g.num_verts():
# In this case, the Rauzy graph is simply a cycle.
g = DiGraph()
g.allow_loops(True)
g.add_vertex(self[:n])
g.add_edge(self[:n],self[:n],self[n:n+len(l)])
else:
g.allow_loops(True)
g.allow_multiple_edges(True)
for v in l:
[i] = g.neighbors_in(v)
[o] = g.neighbors_out(v)
g.add_edge(i,o,g.edge_label(i,v)[0]*g.edge_label(v,o)[0])
g.delete_vertex(v)
return g
def left_special_factors_iterator(self, n=None):
r"""
Return an iterator over the left special factors (of length ``n``).
A factor `u` of a word `w` is *left special* if there are
two distinct letters `a` and `b` such that `au` and `bu`
are factors of `w`.
INPUT:
- ``n`` -- integer (optional, default: ``None``). If ``None``, it returns
an iterator over all left special factors.
EXAMPLES::
sage: alpha, beta, x = 0.54, 0.294, 0.1415
sage: w = words.CodingOfRotationWord(alpha, beta, x)[:40]
sage: sorted(w.left_special_factors_iterator(3))
[word: 000, word: 010]
sage: sorted(w.left_special_factors_iterator(4))
[word: 0000, word: 0101]
sage: sorted(w.left_special_factors_iterator(5))
[word: 00000, word: 01010]
"""
if n is None:
for i in range(self.length()):
for w in self.left_special_factors_iterator(i):
yield w
else:
left_extensions = defaultdict(set)
for w in self.factor_iterator(n+1):
v = w[1:]
left_extensions[v].add(w[0])
for v in left_extensions:
if len(left_extensions[v]) > 1:
yield v
def left_special_factors(self, n=None):
r"""
Return the left special factors (of length ``n``).
A factor `u` of a word `w` is *left special* if there are
two distinct letters `a` and `b` such that `au` and `bu`
are factors of `w`.
INPUT:
- ``n`` -- integer (optional, default: ``None``). If ``None``, it
returns all left special factors.
OUTPUT:
a list of words
EXAMPLES::
sage: alpha, beta, x = 0.54, 0.294, 0.1415
sage: w = words.CodingOfRotationWord(alpha, beta, x)[:40]
sage: for i in range(5):
....: print("{} {}".format(i, sorted(w.left_special_factors(i))))
0 [word: ]
1 [word: 0]
2 [word: 00, word: 01]
3 [word: 000, word: 010]
4 [word: 0000, word: 0101]
"""
return list(self.left_special_factors_iterator(n))
def right_special_factors_iterator(self, n=None):
r"""
Return an iterator over the right special factors (of length ``n``).
A factor `u` of a word `w` is *right special* if there are
two distinct letters `a` and `b` such that `ua` and `ub`
are factors of `w`.
INPUT:
- ``n`` -- integer (optional, default: ``None``). If ``None``, it returns
an iterator over all right special factors.
EXAMPLES::
sage: alpha, beta, x = 0.61, 0.54, 0.3
sage: w = words.CodingOfRotationWord(alpha, beta, x)[:40]
sage: sorted(w.right_special_factors_iterator(3))
[word: 010, word: 101]
sage: sorted(w.right_special_factors_iterator(4))
[word: 0101, word: 1010]
sage: sorted(w.right_special_factors_iterator(5))
[word: 00101, word: 11010]
"""
if n is None:
for i in range(self.length()):
for w in self.right_special_factors_iterator(i):
yield w
else:
right_extensions = defaultdict(set)
for w in self.factor_iterator(n+1):
v = w[:-1]
right_extensions[v].add(w[-1])
for v in right_extensions:
if len(right_extensions[v]) > 1:
yield v
def right_special_factors(self, n=None):
r"""
Return the right special factors (of length ``n``).
A factor `u` of a word `w` is *right special* if there are
two distinct letters `a` and `b` such that `ua` and `ub`
are factors of `w`.
INPUT:
- ``n`` -- integer (optional, default: ``None``). If ``None``, it returns
all right special factors.
OUTPUT:
a list of words
EXAMPLES::
sage: w = words.ThueMorseWord()[:30]
sage: for i in range(5):
....: print("{} {}".format(i, sorted(w.right_special_factors(i))))
0 [word: ]
1 [word: 0, word: 1]
2 [word: 01, word: 10]
3 [word: 001, word: 010, word: 101, word: 110]
4 [word: 0110, word: 1001]
"""
return list(self.right_special_factors_iterator(n))
def bispecial_factors_iterator(self, n=None):
r"""
Return an iterator over the bispecial factors (of length ``n``).
A factor `u` of a word `w` is *bispecial* if it is right special
and left special.
INPUT:
- ``n`` -- integer (optional, default: ``None``). If ``None``, it returns
an iterator over all bispecial factors.
EXAMPLES::
sage: w = words.ThueMorseWord()[:30]
sage: for i in range(10):
....: for u in sorted(w.bispecial_factors_iterator(i)):
....: print("{} {}".format(i,u))
0
1 0
1 1
2 01
2 10
3 010
3 101
4 0110
4 1001
6 011001
6 100110
8 10010110
::
sage: key = lambda u : (len(u), u)
sage: for u in sorted(w.bispecial_factors_iterator(), key=key): u
word:
word: 0
word: 1
word: 01
word: 10
word: 010
word: 101
word: 0110
word: 1001
word: 011001
word: 100110
word: 10010110
"""
if n is None:
for i in range(self.length()):
for w in self.bispecial_factors_iterator(i):
yield w
else:
left_extensions = defaultdict(set)
right_extensions = defaultdict(set)
for w in self.factor_iterator(n+2):
v = w[1:-1]
left_extensions[v].add(w[0])
right_extensions[v].add(w[-1])
for v in left_extensions:
if (len(left_extensions[v]) > 1 and
len(right_extensions[v]) > 1):
yield v
def bispecial_factors(self, n=None):
r"""
Return the bispecial factors (of length ``n``).
A factor `u` of a word `w` is *bispecial* if it is right special
and left special.
INPUT:
- ``n`` -- integer (optional, default: ``None``). If ``None``, it returns
all bispecial factors.
OUTPUT:
a list of words
EXAMPLES::
sage: w = words.FibonacciWord()[:30]
sage: w.bispecial_factors()
[word: , word: 0, word: 010, word: 010010, word: 01001010010]
::
sage: w = words.ThueMorseWord()[:30]
sage: for i in range(10):
....: print("{} {}".format(i, sorted(w.bispecial_factors(i))))
0 [word: ]
1 [word: 0, word: 1]
2 [word: 01, word: 10]
3 [word: 010, word: 101]
4 [word: 0110, word: 1001]
5 []
6 [word: 011001, word: 100110]
7 []
8 [word: 10010110]
9 []
"""
return list(self.bispecial_factors_iterator(n))
def number_of_left_special_factors(self, n):
r"""
Return the number of left special factors of length ``n``.
A factor `u` of a word `w` is *left special* if there are
two distinct letters `a` and `b` such that `au` and `bu`
are factors of `w`.
INPUT:
- ``n`` -- integer
OUTPUT:
a non-negative integer
EXAMPLES::
sage: w = words.FibonacciWord()[:100]
sage: [w.number_of_left_special_factors(i) for i in range(10)]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
::
sage: w = words.ThueMorseWord()[:100]
sage: [w.number_of_left_special_factors(i) for i in range(10)]
[1, 2, 2, 4, 2, 4, 4, 2, 2, 4]
"""
it = self.left_special_factors_iterator(n)
return sum(1 for _ in it)
def number_of_right_special_factors(self, n):
r"""
Return the number of right special factors of length ``n``.
A factor `u` of a word `w` is *right special* if there are
two distinct letters `a` and `b` such that `ua` and `ub`
are factors of `w`.
INPUT:
- ``n`` -- integer
OUTPUT:
a non-negative integer
EXAMPLES::
sage: w = words.FibonacciWord()[:100]
sage: [w.number_of_right_special_factors(i) for i in range(10)]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
::
sage: w = words.ThueMorseWord()[:100]
sage: [w.number_of_right_special_factors(i) for i in range(10)]
[1, 2, 2, 4, 2, 4, 4, 2, 2, 4]
"""
it = self.right_special_factors_iterator(n)
return sum(1 for _ in it)
def commutes_with(self, other):
r"""
Return ``True`` if ``self`` commutes with ``other``, and ``False`` otherwise.
EXAMPLES::
sage: Word('12').commutes_with(Word('12'))
True
sage: Word('12').commutes_with(Word('11'))
False
sage: Word().commutes_with(Word('21'))
True
"""
return (self * other) == (other * self)
def conjugate(self, pos):
r"""
Return the conjugate at ``pos`` of ``self``.
``pos`` can be any integer, the distance used is the modulo by the length
of ``self``.
EXAMPLES::
sage: Word('12112').conjugate(1)
word: 21121
sage: Word().conjugate(2)
word:
sage: Word('12112').conjugate(8)
word: 12121
sage: Word('12112').conjugate(-1)
word: 21211
"""
if self.is_empty():
return self
pos_mod = pos % self.length()
return self[pos_mod:] * self[:pos_mod]
def _conjugates_list(self):
r"""
Return the list of conjugates of ``self``, ordered from the `0`-th to the
`(L-1)`-st conjugate, where `L` is the length of ``self``.
TESTS::
sage: Word('cbbca')._conjugates_list()
[word: cbbca, word: bbcac, word: bcacb, word: cacbb, word: acbbc]
sage: Word('abcabc')._conjugates_list()
[word: abcabc,
word: bcabca,
word: cabcab,
word: abcabc,
word: bcabca,
word: cabcab]
sage: Word()._conjugates_list()
[word: ]
sage: Word('a')._conjugates_list()
[word: a]
"""
S = [self]
for i in range(1,self.length()):
S.append(self.conjugate(i))
return S
def conjugates_iterator(self):
r"""
Return an iterator over the conjugates of ``self``.
EXAMPLES::
sage: it = Word(range(4)).conjugates_iterator()
sage: for w in it: w
word: 0123
word: 1230
word: 2301
word: 3012
"""
yield self
for i in range(1, self.primitive_length()):
yield self.conjugate(i)
def conjugates(self):
r"""
Return the list of unique conjugates of ``self``.
EXAMPLES::
sage: Word(range(6)).conjugates()
[word: 012345,
word: 123450,
word: 234501,
word: 345012,
word: 450123,
word: 501234]
sage: Word('cbbca').conjugates()
[word: cbbca, word: bbcac, word: bcacb, word: cacbb, word: acbbc]
The result contains each conjugate only once::
sage: Word('abcabc').conjugates()
[word: abcabc, word: bcabca, word: cabcab]
TESTS::
sage: Word().conjugates()
[word: ]
sage: Word('a').conjugates()
[word: a]
"""
return list(self.conjugates_iterator())
def conjugate_position(self, other):
r"""
Return the position where ``self`` is conjugate with ``other``.
Return ``None`` if there is no such position.
EXAMPLES::
sage: Word('12113').conjugate_position(Word('31211'))
1
sage: Word('12131').conjugate_position(Word('12113')) is None
True
sage: Word().conjugate_position(Word('123')) is None
True
TESTS:
We check that :trac:`11128` is fixed::
sage: w = Word([0,0,1,0,2,1])
sage: [w.conjugate(i).conjugate_position(w) for i in range(w.length())]
[0, 1, 2, 3, 4, 5]
"""
if self.length() != other.length():
return None
other_square = other * other
pos = other_square.find(self)
return pos if pos != -1 else None
def is_conjugate_with(self, other):
r"""
Return ``True` if ``self`` is a conjugate of ``other``, and ``False`` otherwise.
INPUT:
- ``other`` -- a finite word
OUTPUT:
bool
EXAMPLES::
sage: w = Word([0..20])
sage: z = Word([7..20] + [0..6])
sage: w
word: 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20
sage: z
word: 7,8,9,10,11,12,13,14,15,16,17,18,19,20,0,1,2,3,4,5,6
sage: w.is_conjugate_with(z)
True
sage: z.is_conjugate_with(w)
True
sage: u = Word([4]*21)
sage: u.is_conjugate_with(w)
False
sage: u.is_conjugate_with(z)
False
Both words must be finite::
sage: w = Word(iter([2]*100),length='unknown')
sage: z = Word([2]*100)
sage: z.is_conjugate_with(w) #TODO: Not implemented for word of unknown length
True
sage: wf = Word(iter([2]*100),length='finite')
sage: z.is_conjugate_with(wf)
True
sage: wf.is_conjugate_with(z)
True
TESTS::
sage: Word('11213').is_conjugate_with(Word('31121'))
True
sage: Word().is_conjugate_with(Word('123'))
False
sage: Word('112131').is_conjugate_with(Word('11213'))
False
sage: Word('12131').is_conjugate_with(Word('11213'))
True
We make sure that :trac:`11128` is fixed::
sage: Word('abaa').is_conjugate_with(Word('aaba'))
True
sage: Word('aaba').is_conjugate_with(Word('abaa'))
True
"""
return self.length() == other.length() and self.is_factor(other * other)
def is_cadence(self, seq):
r"""
Return ``True`` if ``seq`` is a cadence of ``self``, and ``False`` otherwise.
A *cadence* is an increasing sequence of indexes that all map to
the same letter.
EXAMPLES::
sage: Word('121132123').is_cadence([0, 2, 6])
True
sage: Word('121132123').is_cadence([0, 1, 2])
False
sage: Word('121132123').is_cadence([])
True
"""
if len(seq) == 0:
return True
try:
it = iter(self)
s = next(islice(it, seq[0], None))
for i in range(1, len(seq)):
steps = seq[i] - seq[i-1]
for n in range(steps-1): next(it)
if next(it) != s:
return False
except StopIteration:
return False
return True
def longest_forward_extension(self, x, y):
r"""
Compute the length of the longest factor of ``self`` that
starts at ``x`` and that matches a factor that starts at ``y``.
INPUT:
- ``x``, ``y`` -- positions in ``self``
EXAMPLES::
sage: w = Word('0011001')
sage: w.longest_forward_extension(0, 4)
3
sage: w.longest_forward_extension(0, 2)
0
The method also accepts negative positions indicating the distance from
the end of the word (in order to be consist with how negative indices
work with lists). For instance, for a word of length `7`, using
positions `-3` and `2` is the same as using positions `4` and `2`::
sage: w.longest_forward_extension(1, -2)
2
sage: w.longest_forward_extension(4, -3)
3
TESTS::
sage: w = Word('0011001')
sage: w.longest_forward_extension(-10, 2)
Traceback (most recent call last):
...
ValueError: x and y must be valid positions in self
"""
length = self.length()
if not (-length <= x < length and -length <= y < length):
raise ValueError("x and y must be valid positions in self")
if x < 0:
x = x + length
if y < 0:
y = y + length
l = 0
while x < length and y < length and self[x] == self[y]:
l += 1
x += 1
y += 1
return l
def longest_backward_extension(self, x, y):
r"""
Compute the length of the longest factor of ``self`` that
ends at ``x`` and that matches a factor that ends at ``y``.
INPUT:
- ``x``, ``y`` -- positions in ``self``
EXAMPLES::
sage: w = Word('0011001')
sage: w.longest_backward_extension(6, 2)
3
sage: w.longest_backward_extension(1, 4)
1
sage: w.longest_backward_extension(1, 3)
0
The method also accepts negative positions indicating the distance from
the end of the word (in order to be consist with how negative indices
work with lists). For instance, for a word of length `7`, using
positions `6` and `-5` is the same as using positions `6` and `2`::
sage: w.longest_backward_extension(6, -5)
3
sage: w.longest_backward_extension(-6, 4)
1
TESTS::
sage: w = Word('0011001')
sage: w.longest_backward_extension(4, 23)
Traceback (most recent call last):
...
ValueError: x and y must be valid positions in self
sage: w.longest_backward_extension(-9, 4)
Traceback (most recent call last):
...
ValueError: x and y must be valid positions in self
"""
length = self.length()
if not (-length <= x < length and -length <= y < length):
raise ValueError("x and y must be valid positions in self")
if x < 0:
x = x + length
if y < 0:
y = y + length
l = 0
while x >= 0 and y >= 0 and self[x] == self[y]:
l += 1
x -= 1
y -= 1
return l
def longest_common_suffix(self, other):
r"""
Return the longest common suffix of ``self`` and ``other``.
EXAMPLES::
sage: w = Word('112345678')
sage: u = Word('1115678')
sage: w.longest_common_suffix(u)
word: 5678
sage: u.longest_common_suffix(u)
word: 1115678
sage: u.longest_common_suffix(w)
word: 5678
sage: w.longest_common_suffix(w)
word: 112345678
sage: y = Word('549332345')
sage: w.longest_common_suffix(y)
word:
TESTS:
With the empty word::
sage: w.longest_common_suffix(Word())
word:
sage: Word().longest_common_suffix(w)
word:
sage: Word().longest_common_suffix(Word())
word:
With an infinite word::
sage: t=words.ThueMorseWord('ab')
sage: w.longest_common_suffix(t)
Traceback (most recent call last):
...
TypeError: other must be a finite word
"""
if not isinstance(other, FiniteWord_class):
raise TypeError("other must be a finite word")
if self.is_empty():
return self
if other.is_empty():
return other
iter = enumerate(zip(reversed(self), reversed(other)))
i,(b,c) = next(iter)
if b != c:
#In this case, return the empty word
return self[:0]
for i,(b,c) in iter:
if b != c:
return self[-i:]
else:
return self[-i-1:]
def is_palindrome(self, f=None):
r"""
Return ``True`` if ``self`` is a palindrome (or a ``f``-palindrome), and
``False`` otherwise.
Let `f : \Sigma \rightarrow \Sigma` be an involution that extends
to a morphism on `\Sigma^*`. We say that `w\in\Sigma^*` is a
*`f`-palindrome* if `w=f(\tilde{w})` [1]. Also called
*`f`-pseudo-palindrome* [2].
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``.
It must be callable on letters as well as words (e.g.
:class:`~sage.combinat.words.morphism.WordMorphism`). The
default value corresponds to usual palindromes, i.e., ``f``
equal to the identity.
EXAMPLES::
sage: Word('esope reste ici et se repose').is_palindrome()
False
sage: Word('esoperesteicietserepose').is_palindrome()
True
sage: Word('I saw I was I').is_palindrome()
True
sage: Word('abbcbba').is_palindrome()
True
sage: Word('abcbdba').is_palindrome()
False
Some `f`-palindromes::
sage: f = WordMorphism('a->b,b->a')
sage: Word('aababb').is_palindrome(f)
True
::
sage: f = WordMorphism('a->b,b->a,c->c')
sage: Word('abacbacbab').is_palindrome(f)
True
::
sage: f = WordMorphism({'a':'b','b':'a'})
sage: Word('aababb').is_palindrome(f)
True
::
sage: f = WordMorphism({0:[1],1:[0]})
sage: w = words.ThueMorseWord()[:8]; w
word: 01101001
sage: w.is_palindrome(f)
True
The word must be in the domain of the involution::
sage: f = WordMorphism('a->a')
sage: Word('aababb').is_palindrome(f)
Traceback (most recent call last):
...
KeyError: 'b'
TESTS:
If the given involution is not an involution::
sage: f = WordMorphism('a->b,b->b')
sage: Word('abab').is_palindrome(f)
Traceback (most recent call last):
...
TypeError: self (=a->b, b->b) is not an endomorphism
::
sage: Y = Word
sage: Y().is_palindrome()
True
sage: Y('a').is_palindrome()
True
sage: Y('ab').is_palindrome()
False
sage: Y('aba').is_palindrome()
True
sage: Y('aa').is_palindrome()
True
sage: E = WordMorphism('a->b,b->a')
sage: Y().is_palindrome(E)
True
sage: Y('a').is_palindrome(E)
False
sage: Y('ab').is_palindrome(E)
True
sage: Y('aa').is_palindrome(E)
False
sage: Y('aba').is_palindrome(E)
False
sage: Y('abab').is_palindrome(E)
True
REFERENCES:
- [1] S. Labbé, Propriétés combinatoires des `f`-palindromes,
Mémoire de maîtrise en Mathématiques, Montréal, UQAM, 2008,
109 pages.
- [2] V. Anne, L.Q. Zamboni, I. Zorca, Palindromes and Pseudo-
Palindromes in Episturmian and Pseudo-Palindromic Infinite Words,
in : S. Brlek, C. Reutenauer (Eds.), Words 2005, Publications du
LaCIM, Vol. 36 (2005) 91--100.
"""
l = self.length()
if f is None:
return self[:l//2] == self[l//2 + l%2:].reversal()
else:
from sage.combinat.words.morphism import WordMorphism
if not isinstance(f, WordMorphism):
f = WordMorphism(f)
if not f.is_involution():
raise ValueError("f must be an involution")
return self[:l//2 + l%2] == f(self[l//2:].reversal())
def lps(self, f=None, l=None):
r"""
Return the longest palindromic (or ``f``-palindromic) suffix of ``self``.
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``.
It must be callable on letters as well as words (e.g. ``WordMorphism``).
- ``l`` -- integer (default: ``None``) the length of the longest
palindrome suffix of ````self[:-1]````, if known.
OUTPUT:
word -- If ``f`` is ``None``, the longest palindromic suffix of ``self``;
otherwise, the longest ``f``-palindromic suffix of ``self``.
EXAMPLES::
sage: Word('0111').lps()
word: 111
sage: Word('011101').lps()
word: 101
sage: Word('6667').lps()
word: 7
sage: Word('abbabaab').lps()
word: baab
sage: Word().lps()
word:
sage: f = WordMorphism('a->b,b->a')
sage: Word('abbabaab').lps(f=f)
word: abbabaab
sage: w = Word('33412321')
sage: w.lps(l=3)
word: 12321
sage: Y = Word
sage: w = Y('01101001')
sage: w.lps(l=2)
word: 1001
sage: w.lps()
word: 1001
sage: w.lps(l=None)
word: 1001
sage: Y().lps(l=2)
Traceback (most recent call last):
...
IndexError: list index out of range
sage: v = Word('abbabaab')
sage: pal = v[:0]
sage: for i in range(1, v.length()+1):
....: pal = v[:i].lps(l=pal.length())
....: pal
word: a
word: b
word: bb
word: abba
word: bab
word: aba
word: aa
word: baab
sage: f = WordMorphism('a->b,b->a')
sage: v = Word('abbabaab')
sage: pal = v[:0]
sage: for i in range(1, v.length()+1):
....: pal = v[:i].lps(f=f, l=pal.length())
....: pal
word:
word: ab
word:
word: ba
word: ab
word: baba
word: bbabaa
word: abbabaab
"""
#If the length of the lps of self[:-1] is not known:
if l is None:
l = self.lps_lengths(f)[-1]
return self[len(self)-l:]
#If l == w[:-1].length(), there is no shortcut
if self.length() == l + 1:
return self.lps(f=f)
#Obtain the letter to the left (g) and to the right (d) of the
#precedent lps of self
g = self[-l-2]
d = self[-1]
#If the word g*d is a `f`-palindrome, the result follows
if f is None:
if g == d:
return self[-l-2:]
else:
#Otherwise, the length of the lps of self is smallest than l+2
return self[-l-1:].lps()
else:
from sage.combinat.words.morphism import WordMorphism
f = WordMorphism(f)
if f(g)[0] == d:
return self[-l-2:]
else:
return self[-l-1:].lps(f=f)
@cached_method
def palindromic_lacunas_study(self, f=None):
r"""
Return interesting statistics about longest (``f``-)palindromic suffixes
and lacunas of ``self`` (see [1] and [2]).
Note that a word `w` has at most `|w| + 1` different palindromic factors
(see [3]). For `f`-palindromes (or pseudopalidromes or theta-palindromes),
the maximum number of `f`-palindromic factors is `|w|+1-g_f(w)`, where
`g_f(w)` is the number of pairs `\{a, f(a)\}` such that `a` is a letter,
`a` is not equal to `f(a)`, and `a` or `f(a)` occurs in `w`, see [4].
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``.
It must be callable on letters as well as words (e.g. ``WordMorphism``).
The default value corresponds to usual palindromes, i.e.,
``f`` equal to the identity.
OUTPUT:
- ``list`` -- list of the length of the longest palindromic
suffix (lps) for each non-empty prefix of ``self``
- ``list`` -- list of all the lacunas, i.e. positions where there is no
unioccurrent lps
- ``set`` -- set of palindromic factors of ``self``
EXAMPLES::
sage: a,b,c = Word('abbabaabbaab').palindromic_lacunas_study()
sage: a
[1, 1, 2, 4, 3, 3, 2, 4, 2, 4, 6, 8]
sage: b
[8, 9]
sage: c # random order
set([word: , word: b, word: bab, word: abba, word: bb, word: aa, word: baabbaab, word: baab, word: aba, word: aabbaa, word: a])
::
sage: f = WordMorphism('a->b,b->a')
sage: a,b,c = Word('abbabaab').palindromic_lacunas_study(f=f)
sage: a
[0, 2, 0, 2, 2, 4, 6, 8]
sage: b
[0, 2, 4]
sage: c # random order
set([word: , word: ba, word: baba, word: ab, word: bbabaa, word: abbabaab])
sage: c == set([Word(), Word('ba'), Word('baba'), Word('ab'), Word('bbabaa'), Word('abbabaab')])
True
REFERENCES:
- [1] A. Blondin-Massé, S. Brlek, S. Labbé, Palindromic lacunas
of the Thue-Morse word, Proc. GASCOM 2008 (June 16-20 2008,
Bibbiena, Arezzo-Italia), 53--67.
- [2] A. Blondin-Massé, S. Brlek, A. Frosini, S. Labbé, S. Rinaldi,
Reconstructing words from a fixed palindromic length sequence,
Proc. TCS 2008, 5th IFIP International Conference on Theoretical
Computer Science (September 8-10 2008, Milano, Italia), accepted.
- [3] X. Droubay, J. Justin, G. Pirillo, Episturmian words and
some constructions of de Luca and Rauzy, Theoret. Comput. Sci.
255 (2001) 539--553.
- [4] Š. Starosta, On Theta-palindromic Richness, Theoret. Comp.
Sci. 412 (2011) 1111--1121
"""
#Initialize the results of computations
palindromes = set()
lengths_lps = [None] * self.length()
lacunas = []
#Initialize the first lps
pal = self[:0]
palindromes.add(pal)
#For all the non-empty prefixes of self,
for i in range(self.length()):
#Compute its longest `f`-palindromic suffix using the preceding lps (pal)
pal = self[:i+1].lps(l=pal.length(),f=f)
lengths_lps[i] = pal.length()
if pal in palindromes:
lacunas.append(i)
else :
palindromes.add(pal)
return lengths_lps, lacunas, palindromes
def lengths_lps(self, f=None):
r"""
Return the list of the length of the longest palindromic
suffix (lps) for each non-empty prefix of ``self``.
It corresponds to the function `G_w` defined in [1].
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
a list -- list of the length of the longest palindromic
suffix (lps) for each non-empty prefix of ``self``
EXAMPLES::
sage: Word().lengths_lps()
[]
sage: Word('a').lengths_lps()
[1]
sage: Word('aaa').lengths_lps()
[1, 2, 3]
sage: Word('abbabaabbaab').lengths_lps()
[1, 1, 2, 4, 3, 3, 2, 4, 2, 4, 6, 8]
::
sage: f = WordMorphism('a->b,b->a')
sage: Word('abbabaabbaab').lengths_lps(f)
[0, 2, 0, 2, 2, 4, 6, 8, 4, 6, 4, 6]
::
sage: f = WordMorphism({5:[8],8:[5]})
sage: Word([5,8,5,5,8,8,5,5,8,8,5,8,5]).lengths_lps(f)
[0, 2, 2, 0, 2, 4, 6, 4, 6, 8, 10, 12, 4]
REFERENCES:
- [1] A. Blondin-Massé, S. Brlek, A. Frosini, S. Labbé,
S. Rinaldi, Reconstructing words from a fixed palindromic length
sequence, Proc. TCS 2008, 5th IFIP International Conference on
Theoretical Computer Science (September 8-10 2008, Milano,
Italia), accepted.
"""
return self.palindromic_lacunas_study(f=f)[0]
def lacunas(self, f=None):
r"""
Return the list of all the lacunas of ``self``.
A *lacuna* is a position in a word where the longest (`f`-)palindromic
suffix is not unioccurrent (see [1]).
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``). The
default value corresponds to usual palindromes, i.e., ``f`` equal to
the identity.
OUTPUT:
a list -- list of all the lacunas of self
EXAMPLES::
sage: w = Word([0,1,1,2,3,4,5,1,13,3])
sage: w.lacunas()
[7, 9]
sage: words.ThueMorseWord()[:100].lacunas()
[8, 9, 24, 25, 32, 33, 34, 35, 36, 37, 38, 39, 96, 97, 98, 99]
sage: f = WordMorphism({0:[1],1:[0]})
sage: words.ThueMorseWord()[:50].lacunas(f)
[0, 2, 4, 12, 16, 17, 18, 19, 48, 49]
REFERENCES:
- [1] A. Blondin-Massé, S. Brlek, S. Labbé, Palindromic lacunas
of the Thue-Morse word, Proc. GASCOM 2008 (June 16-20 2008,
Bibbiena, Arezzo-Italia), 53--67.
"""
return self.palindromic_lacunas_study(f=f)[1]
def lengths_unioccurrent_lps(self, f=None):
r"""
Return the list of the lengths of the unioccurrent longest
(``f``)-palindromic suffixes (lps) for each non-empty prefix of ``self.`` No
unioccurrent lps are indicated by ``None``.
It corresponds to the function `H_w` defined in [1] and [2].
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``). The
default value corresponds to usual palindromes, i.e., ``f`` equal to
the identity.
OUTPUT:
a list -- list of the length of the unioccurrent longest palindromic
suffix (lps) for each non-empty prefix of ``self``.
No unioccurrent lps are indicated by ``None``.
EXAMPLES::
sage: w = Word([0,1,1,2,3,4,5,1,13,3])
sage: w.lengths_unioccurrent_lps()
[1, 1, 2, 1, 1, 1, 1, None, 1, None]
sage: f = words.FibonacciWord()[:20]
sage: f.lengths_unioccurrent_lps() == f.lengths_lps()
True
sage: t = words.ThueMorseWord()
sage: t[:20].lengths_unioccurrent_lps()
[1, 1, 2, 4, 3, 3, 2, 4, None, None, 6, 8, 10, 12, 14, 16, 6, 8, 10, 12]
sage: f = WordMorphism({1:[0],0:[1]})
sage: t[:15].lengths_unioccurrent_lps(f)
[None, 2, None, 2, None, 4, 6, 8, 4, 6, 4, 6, None, 4, 6]
REFERENCES:
- [1] A. Blondin-Massé, S. Brlek, S. Labbé, Palindromic lacunas of
the Thue-Morse word, Proc. GASCOM 2008 (June 16-20 2008, Bibbiena,
Arezzo-Italia), 53--67.
- [2] A. Blondin-Massé, S. Brlek, A. Frosini, S. Labbé, S. Rinaldi,
Reconstructing words from a fixed palindromic length sequence,
Proc. TCS 2008, 5th IFIP International Conference on Theoretical
Computer Science (September 8-10 2008, Milano, Italia), accepted.
"""
l = self.lengths_lps(f=f)
for i in self.lacunas(f=f):
l[i] = None
return l
def length_maximal_palindrome(self, j, m=None, f=None):
r"""
Return the length of the longest palindrome centered at position ``j``.
INPUT:
- ``j`` -- rational, position of the symmetry axis of the palindrome.
Must return an integer when doubled. It is an integer when the
center of the palindrome is a letter.
- ``m`` -- integer (default: ``None``), minimal length of palindrome, if known.
The parity of ``m`` can't be the same as the parity of ``2j``.
- ``f`` -- involution (default: ``None``), on the alphabet. It must be
callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
length of the longest ``f``-palindrome centered at position ``j``
EXAMPLES::
sage: Word('01001010').length_maximal_palindrome(3/2)
0
sage: Word('01101001').length_maximal_palindrome(3/2)
4
sage: Word('01010').length_maximal_palindrome(j=3, f='0->1,1->0')
0
sage: Word('01010').length_maximal_palindrome(j=2.5, f='0->1,1->0')
4
sage: Word('0222220').length_maximal_palindrome(3, f='0->1,1->0,2->2')
5
::
sage: w = Word('abcdcbaxyzzyx')
sage: w.length_maximal_palindrome(3)
7
sage: w.length_maximal_palindrome(3, 3)
7
sage: w.length_maximal_palindrome(3.5)
0
sage: w.length_maximal_palindrome(9.5)
6
sage: w.length_maximal_palindrome(9.5, 2)
6
TESTS:
These are wrong inputs::
sage: w.length_maximal_palindrome(9.6)
Traceback (most recent call last):
...
ValueError: j must be positive, inferior to length of self
sage: w.length_maximal_palindrome(3, 2)
Traceback (most recent call last):
...
ValueError: (2*j-m-1)/2(=3/2) must be an integer, i.e., 2*j(=6) and
m(=2) can't have the same parity
sage: w.length_maximal_palindrome(9.5, 3)
Traceback (most recent call last):
...
ValueError: (2*j-m-1)/2(=15/2) must be an integer, i.e., 2*j(=19) and
m(=3) can't have the same parity
"""
# Ensure `f` is an involutory word morphism
if f is not None:
from sage.combinat.words.morphism import WordMorphism
if not isinstance(f, WordMorphism):
f = WordMorphism(f)
if not f.is_involution():
raise ValueError("f must be an involution")
# Ensure j is a valid entry
jj = 2*j
if not jj.is_integer() or j < 0 or j >= len(self):
raise ValueError("j must be positive, inferior to length of self")
jj = Integer(jj)
# Initialize length of the known palindrome
if m is None:
m = 0 if jj % 2 == 1 else -1
# Initialize the next (left) position to check
i = (jj - m - 1) / 2
if not i.is_integer():
raise ValueError("(2*j-m-1)/2(={}) must be an integer, i.e., "
"2*j(={}) and m(={}) can't "
"have the same parity".format(i, jj, m))
i = Integer(i)
# Compute
if f is None:
while i >= 0 and jj-i < len(self) and self[i] == self[jj-i]:
i -= 1
else:
while i >= 0 and jj-i < len(self) and self[i] == f(self[jj-i])[0]:
i -= 1
if jj == 2 * i:
return 0
else:
return jj - 2*i - 1
def lengths_maximal_palindromes(self, f=None):
r"""
Return the length of maximal palindromes centered at each position.
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
a list -- The length of the maximal palindrome (or ``f``-palindrome)
with a given symmetry axis (letter or space between two letters).
EXAMPLES::
sage: Word('01101001').lengths_maximal_palindromes()
[0, 1, 0, 1, 4, 1, 0, 3, 0, 3, 0, 1, 4, 1, 0, 1, 0]
sage: Word('00000').lengths_maximal_palindromes()
[0, 1, 2, 3, 4, 5, 4, 3, 2, 1, 0]
sage: Word('0').lengths_maximal_palindromes()
[0, 1, 0]
sage: Word('').lengths_maximal_palindromes()
[0]
sage: Word().lengths_maximal_palindromes()
[0]
sage: f = WordMorphism('a->b,b->a')
sage: Word('abbabaab').lengths_maximal_palindromes(f)
[0, 0, 2, 0, 0, 0, 2, 0, 8, 0, 2, 0, 0, 0, 2, 0, 0]
"""
if f is not None :
from sage.combinat.words.morphism import WordMorphism
if not isinstance(f, WordMorphism):
f = WordMorphism(f)
if not f.is_involution():
raise ValueError("f must be an involution")
LPC = [] # lengths of the maximal palindromes centered at a position
LPC.append(0)
k = 0 # index, center of rightmost-ending `f`-palindrome encountered
for j in range(1, 2 * len(self) + 1):
if j >= k + LPC[k]:
p = self.length_maximal_palindrome((j - 1)*0.5, -(j%2), f)
LPC.append(p)
if j + p > k + LPC[k]:
k = j
# If the center is included in an encountered `f`-palindrome
else:
# If the `f`-palindrome centered at position j is not the
# longest proper `f`-palindromic suffix of the maximal
# `f`-palindrome centered at k
if LPC[k] + k - j != LPC[2*k - j]:
LPC.append(min(LPC[k] + k - j, LPC[2*k - j]))
else:
mp = LPC[k] + k - j
p = self.length_maximal_palindrome((j-1)*0.5, mp, f)
LPC.append(p)
k = j
return LPC
def lps_lengths(self, f=None):
r"""
Return the length of the longest palindromic suffix of each prefix.
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
a list -- The length of the longest palindromic (or ``f``-palindromic)
suffix of each prefix of ``self``.
EXAMPLES::
sage: Word('01101001').lps_lengths()
[0, 1, 1, 2, 4, 3, 3, 2, 4]
sage: Word('00000').lps_lengths()
[0, 1, 2, 3, 4, 5]
sage: Word('0').lps_lengths()
[0, 1]
sage: Word('').lps_lengths()
[0]
sage: Word().lps_lengths()
[0]
sage: f = WordMorphism('a->b,b->a')
sage: Word('abbabaab').lps_lengths(f)
[0, 0, 2, 0, 2, 2, 4, 6, 8]
"""
LPC = self.lengths_maximal_palindromes(f)
LPS = [] # lengths of the longest palindromic suffix of prefixes
k = 0
LPS.append(0)
for j in range(1, 2*len(self)+1):
if j + LPC[j] > k + LPC[k]:
for i in range(k + LPC[k] + 1, j + LPC[j] + 1):
if i % 2 == 0:
LPS.append(i-j)
k = j
return LPS
def palindromes(self, f=None):
r"""
Return the set of all palindromic (or ``f``-palindromic) factors of ``self``.
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
a set -- If ``f`` is ``None``, the set of all palindromic factors of ``self``;
otherwise, the set of all ``f``-palindromic factors of ``self``.
EXAMPLES::
sage: sorted(Word('01101001').palindromes())
[word: , word: 0, word: 00, word: 010, word: 0110, word: 1, word: 1001, word: 101, word: 11]
sage: sorted(Word('00000').palindromes())
[word: , word: 0, word: 00, word: 000, word: 0000, word: 00000]
sage: sorted(Word('0').palindromes())
[word: , word: 0]
sage: sorted(Word('').palindromes())
[word: ]
sage: sorted(Word().palindromes())
[word: ]
sage: f = WordMorphism('a->b,b->a')
sage: sorted(Word('abbabaab').palindromes(f))
[word: , word: ab, word: abbabaab, word: ba, word: baba, word: bbabaa]
"""
LPS = self.lps_lengths(f)
return set(self[i-LPS[i] : i] for i in range(len(self)+1))
def palindrome_prefixes(self):
r"""
Return a list of all palindrome prefixes of ``self``.
OUTPUT:
a list -- A list of all palindrome prefixes of ``self``.
EXAMPLES::
sage: w = Word('abaaba')
sage: w.palindrome_prefixes()
[word: , word: a, word: aba, word: abaaba]
sage: w = Word('abbbbbbbbbb')
sage: w.palindrome_prefixes()
[word: , word: a]
"""
return list(self.palindrome_prefixes_iterator())
def defect(self, f=None):
r"""
Return the defect of ``self``.
The *defect* of a finite word `w` is given by the difference between
the maximum number of possible palindromic factors in a word of length
`|w|` and the actual number of palindromic factors contained in `w`.
It is well known that the maximum number of palindromic factors in `w`
is `|w|+1` (see [DJP01]_).
An optional involution on letters ``f`` can be given. In that case, the
*f-palindromic defect* (or *pseudopalindromic defect*, or
*theta-palindromic defect*) of `w` is returned. It is a
generalization of defect to f-palindromes. More precisely, the defect is
`D(w)=|w|+1-g_f(w)-|PAL_f(w)|`, where `PAL_f(w)` denotes the set of
f-palindromic factors of `w` (including the empty word) and `g_f(w)` is
the number of pairs `\{a, f(a)\}` such that `a` is a letter, `a` is not
equal to `f(a)`, and `a` or `f(a)` occurs in `w`. In the case of usual
palindromes (i.e., for ``f`` not given or equal to the identity),
`g_f(w) = 0` for all `w`. See [BHNR04]_ for usual palindromes and [Sta11]_
for f-palindromes.
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``). The
default value corresponds to usual palindromes, i.e., ``f`` equal to
the identity.
OUTPUT:
an integer -- If ``f`` is ``None``, the palindromic defect of ``self``;
otherwise, the ``f``-palindromic defect of ``self``.
EXAMPLES::
sage: Word('ara').defect()
0
sage: Word('abcacba').defect()
1
It is known that Sturmian words (see [DJP01]_) have zero defect::
sage: words.FibonacciWord()[:100].defect()
0
sage: sa = WordMorphism('a->ab,b->b')
sage: sb = WordMorphism('a->a,b->ba')
sage: w = (sa*sb*sb*sa*sa*sa*sb).fixed_point('a')
sage: w[:30].defect()
0
sage: w[110:140].defect()
0
It is even conjectured that the defect of an aperiodic word which is
a fixed point of a primitive morphism is either `0` or infinite
(see [BBGL08]_)::
sage: w = words.ThueMorseWord()
sage: w[:50].defect()
12
sage: w[:100].defect()
16
sage: w[:300].defect()
52
For generalized defect with an involution different from the identity,
there is always a letter which is not a palindrome! This is the reason
for the modification of the definition::
sage: f = WordMorphism('a->b,b->a')
sage: Word('a').defect(f)
0
sage: Word('ab').defect(f)
0
sage: Word('aa').defect(f)
1
sage: Word('abbabaabbaababba').defect(f)
3
::
sage: f = WordMorphism('a->b,b->a,c->c')
sage: Word('cabc').defect(f)
0
sage: Word('abcaab').defect(f)
2
Other examples::
sage: Word('000000000000').defect()
0
sage: Word('011010011001').defect()
2
sage: Word('0101001010001').defect()
0
sage: Word().defect()
0
sage: Word('abbabaabbaababba').defect()
2
REFERENCES:
.. [BBGL08] \A. Blondin Massé, S. Brlek, A. Garon, and S. Labbé,
Combinatorial properties of f -palindromes in the Thue-Morse
sequence. Pure Math. Appl., 19(2-3):39--52, 2008.
.. [BHNR04] \S. Brlek, S. Hamel, M. Nivat, C. Reutenauer, On the
Palindromic Complexity of Infinite Words, in J. Berstel, J.
Karhumaki, D. Perrin, Eds, Combinatorics on Words with Applications,
International Journal of Foundation of Computer Science, Vol. 15,
No. 2 (2004) 293--306.
.. [DJP01] \X. Droubay, J. Justin, G. Pirillo, Episturmian words and some
constructions of de Luca and Rauzy, Theoret. Comput. Sci. 255,
(2001), no. 1--2, 539--553.
.. [Sta11] \Š. Starosta, On Theta-palindromic Richness, Theoret. Comp.
Sci. 412 (2011) 1111--1121
"""
g_w = 0
if f is not None:
from sage.combinat.words.morphism import WordMorphism
if not isinstance(f, WordMorphism):
f = WordMorphism(f)
if not f.is_involution():
raise ValueError("f must be an involution")
D = f.domain()
A = set(map(D, self.letters()))
while A:
x = A.pop()
if f(x) != x: # count only non f-palindromic letters
if f(x) in A:
A.remove(f(x))
g_w +=1
return self.length()+1-g_w-len(self.palindromes(f=f))
def is_full(self, f=None):
r"""
Return ``True`` if ``self`` has defect `0`, and ``False`` otherwise.
A word is *full* (or *rich*) if its defect is zero (see [1]).
If ``f`` is given, then the ``f``-palindromic defect is used (see [2]).
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
boolean -- If ``f`` is ``None``, whether ``self`` is full;
otherwise, whether ``self`` is full of ``f``-palindromes.
EXAMPLES::
sage: words.ThueMorseWord()[:100].is_full()
False
sage: words.FibonacciWord()[:100].is_full()
True
sage: Word('000000000000000').is_full()
True
sage: Word('011010011001').is_full()
False
sage: Word('2194').is_full()
True
sage: Word().is_full()
True
::
sage: f = WordMorphism('a->b,b->a')
sage: Word().is_full(f)
True
sage: w = Word('ab')
sage: w.is_full()
True
sage: w.is_full(f)
True
::
sage: f = WordMorphism('a->b,b->a')
sage: Word('abab').is_full(f)
True
sage: Word('abba').is_full(f)
False
A simple example of an infinite word full of f-palindromes::
sage: p = WordMorphism({0:'abc',1:'ab'})
sage: f = WordMorphism('a->b,b->a,c->c')
sage: p(words.FibonacciWord()[:50]).is_full(f)
True
sage: p(words.FibonacciWord()[:150]).is_full(f)
True
REFERENCES:
- [1] S. Brlek, S. Hamel, M. Nivat, C. Reutenauer, On the Palindromic
Complexity of Infinite Words, in J. Berstel, J. Karhumaki,
D. Perrin, Eds, Combinatorics on Words with Applications,
International Journal of Foundation of Computer Science, Vol. 15,
No. 2 (2004) 293--306.
- [2] E. Pelantová, Š. Starosta, Infinite words rich and almost rich
in generalized palindromes, in: G. Mauri, A. Leporati (Eds.),
Developments in Language Theory, volume 6795 of Lecture Notes
in Computer Science, Springer-Verlag, Berlin, Heidelberg, 2011,
pp. 406--416
"""
return self.defect(f=f) == 0
is_rich = is_full
def palindromic_closure(self, side='right', f=None):
r"""
Return the shortest palindrome having ``self`` as a prefix
(or as a suffix if ``side`` is ``'left'``).
See [1].
INPUT:
- ``side`` -- ``'right'`` or ``'left'`` (default: ``'right'``) the
direction of the closure
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``.
It must be callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
a word -- If ``f`` is ``None``, the right palindromic closure of ``self``;
otherwise, the right ``f``-palindromic closure of ``self``.
If ``side`` is ``'left'``, the left palindromic closure.
EXAMPLES::
sage: Word('1233').palindromic_closure()
word: 123321
sage: Word('12332').palindromic_closure()
word: 123321
sage: Word('0110343').palindromic_closure()
word: 01103430110
sage: Word('0110343').palindromic_closure(side='left')
word: 3430110343
sage: Word('01105678').palindromic_closure(side='left')
word: 876501105678
sage: w = Word('abbaba')
sage: w.palindromic_closure()
word: abbababba
::
sage: f = WordMorphism('a->b,b->a')
sage: w.palindromic_closure(f=f)
word: abbabaab
sage: w.palindromic_closure(f=f, side='left')
word: babaabbaba
TESTS::
sage: f = WordMorphism('a->c,c->a')
sage: w.palindromic_closure(f=f, side='left')
Traceback (most recent call last):
...
KeyError: 'b'
REFERENCES:
- [1] A. de Luca, A. De Luca, Pseudopalindrome closure operators
in free monoids, Theoret. Comput. Sci. 362 (2006) 282--300.
"""
if f is None:
if side == 'right':
l = self.lps().length()
#return self * self[-(l+1)::-1]
return self * self[:self.length()-l].reversal()
elif side == 'left':
l = self.reversal().lps().length()
return self[:l-1:-1] * self
else:
raise ValueError("side must be either 'left' or 'right' (not %s) " % side)
else:
from sage.combinat.words.morphism import WordMorphism
f = WordMorphism(f)
if not f.is_involution():
raise ValueError("f must be an involution")
if side == 'right':
l = self.lps(f=f).length()
return self * f(self[-(l+1)::-1])
elif side == 'left':
l = self.reversal().lps(f=f).length()
return f(self[:l-1:-1]) * self
else:
raise ValueError("side must be either 'left' or 'right' (not %s) " % side)
def is_symmetric(self, f=None):
r"""
Return ``True`` if ``self`` is symmetric (or ``f``-symmetric), and
``False`` otherwise.
A word is *symmetric* (resp. `f`-*symmetric*) if it is the
product of two palindromes (resp. `f`-palindromes). See [1] and [2].
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``. It must
be callable on letters as well as words (e.g. ``WordMorphism``).
EXAMPLES::
sage: Word('abbabab').is_symmetric()
True
sage: Word('ababa').is_symmetric()
True
sage: Word('aababaabba').is_symmetric()
False
sage: Word('aabbbaababba').is_symmetric()
False
sage: f = WordMorphism('a->b,b->a')
sage: Word('aabbbaababba').is_symmetric(f)
True
REFERENCES:
- [1] S. Brlek, S. Hamel, M. Nivat, C. Reutenauer, On the Palindromic
Complexity of Infinite Words, in J. Berstel, J. Karhumaki,
D. Perrin, Eds, Combinatorics on Words with Applications,
International Journal of Foundation of Computer Science, Vol. 15,
No. 2 (2004) 293--306.
- [2] A. de Luca, A. De Luca, Pseudopalindrome closure operators
in free monoids, Theoret. Comput. Sci. 362 (2006) 282--300.
"""
square = self*self
return square.lps_lengths(f)[-1] >= self.length()
def length_border(self):
r"""
Return the length of the border of ``self``.
The *border* of a word is the longest word that is both a proper
prefix and a proper suffix of ``self``.
EXAMPLES::
sage: Word('121').length_border()
1
sage: Word('1').length_border()
0
sage: Word('1212').length_border()
2
sage: Word('111').length_border()
2
sage: Word().length_border() is None
True
"""
if self.is_empty():
return None
return self.prefix_function_table()[-1]
def border(self):
r"""
Return the longest word that is both a proper prefix and a proper
suffix of ``self``.
EXAMPLES::
sage: Word('121212').border()
word: 1212
sage: Word('12321').border()
word: 1
sage: Word().border() is None
True
"""
if self.is_empty():
return None
return self[:self.length_border()]
def minimal_period(self):
r"""
Return the period of ``self``.
Let `A` be an alphabet. An integer `p\geq 1` is a *period* of a
word `w=a_1a_2\cdots a_n` where `a_i\in A` if `a_i=a_{i+p}` for
`i=1,\ldots,n-p`. The smallest period of `w` is called *the*
period of `w`. See Chapter 1 of [1].
EXAMPLES::
sage: Word('aba').minimal_period()
2
sage: Word('abab').minimal_period()
2
sage: Word('ababa').minimal_period()
2
sage: Word('ababaa').minimal_period()
5
sage: Word('ababac').minimal_period()
6
sage: Word('aaaaaa').minimal_period()
1
sage: Word('a').minimal_period()
1
sage: Word().minimal_period()
1
REFERENCES:
- [1] M. Lothaire, Algebraic Combinatorics On Words, vol. 90 of
Encyclopedia of Mathematics and its Applications, Cambridge
University Press, U.K., 2002.
"""
if self.is_empty():
return 1
return self.length()-self.length_border()
def order(self):
r"""
Return the order of ``self``.
Let `p(w)` be the period of a word `w`. The positive rational number
`|w|/p(w)` is the *order* of `w`. See Chapter 8 of [1].
OUTPUT:
rational -- the order
EXAMPLES::
sage: Word('abaaba').order()
2
sage: Word('ababaaba').order()
8/5
sage: Word('a').order()
1
sage: Word('aa').order()
2
sage: Word().order()
0
REFERENCES:
- [1] M. Lothaire, Algebraic Combinatorics On Words, vol. 90 of
Encyclopedia of Mathematics and its Applications, Cambridge
University Press, U.K., 2002.
"""
from sage.rings.rational import Rational
return Rational((self.length(),self.minimal_period()))
def critical_exponent(self):
r"""
Return the critical exponent of ``self``.
The *critical exponent* of a word is the supremum of the order of
all its (finite) factors. See [1].
.. note::
The implementation here uses the suffix tree to enumerate all the
factors. It should be improved.
EXAMPLES::
sage: Word('aaba').critical_exponent()
2
sage: Word('aabaa').critical_exponent()
2
sage: Word('aabaaba').critical_exponent()
7/3
sage: Word('ab').critical_exponent()
1
sage: Word('aba').critical_exponent()
3/2
sage: words.ThueMorseWord()[:20].critical_exponent()
2
REFERENCES:
.. [Dejean] \F. Dejean. Sur un théorème de Thue. J. Combinatorial Theory
Ser. A 13:90--99, 1972.
"""
return max(map(FiniteWord_class.order, self.factor_iterator()))
def is_overlap(self):
r"""
Return ``True`` if ``self`` is an overlap, and ``False`` otherwise.
EXAMPLES::
sage: Word('12121').is_overlap()
True
sage: Word('123').is_overlap()
False
sage: Word('1231').is_overlap()
False
sage: Word('123123').is_overlap()
False
sage: Word('1231231').is_overlap()
True
sage: Word().is_overlap()
False
"""
if self.length() == 0:
return False
return self.length_border() > self.length()//2
def primitive_length(self):
r"""
Return the length of the primitive of ``self``.
EXAMPLES::
sage: Word('1231').primitive_length()
4
sage: Word('121212').primitive_length()
2
"""
l = lu = self.length()
if l == 0:
return 0
p = self.prefix_function_table()
while l > 0:
l = p[l-1]
if lu % (lu - l) == 0:
return lu - l
def is_primitive(self):
r"""
Return ``True`` if ``self`` is primitive, and ``False`` otherwise.
A finite word `w` is *primitive* if it is not a positive integer
power of a shorter word.
EXAMPLES::
sage: Word('1231').is_primitive()
True
sage: Word('111').is_primitive()
False
"""
return self.length() == self.primitive_length()
def primitive(self):
r"""
Return the primitive of ``self``.
EXAMPLES::
sage: Word('12312').primitive()
word: 12312
sage: Word('121212').primitive()
word: 12
"""
return self[:self.primitive_length()]
def exponent(self):
r"""
Return the exponent of ``self``.
OUTPUT:
integer -- the exponent
EXAMPLES::
sage: Word('1231').exponent()
1
sage: Word('121212').exponent()
3
sage: Word().exponent()
0
"""
if self.length() == 0:
return 0
return self.length() // self.primitive_length()
def has_period(self, p):
r"""
Return ``True`` if ``self`` has the period ``p``,
``False`` otherwise.
.. NOTE::
By convention, integers greater than the length
of ``self`` are periods of ``self``.
INPUT:
- ``p`` -- an integer to check if it is a period
of ``self``.
EXAMPLES::
sage: w = Word('ababa')
sage: w.has_period(2)
True
sage: w.has_period(3)
False
sage: w.has_period(4)
True
sage: w.has_period(-1)
False
sage: w.has_period(5)
True
sage: w.has_period(6)
True
"""
if p < 0:
return False
elif p >= len(self):
return True
else:
for i in range(len(self) - p):
if self[i] != self[i + p]:
return False
return True
def periods(self, divide_length=False):
r"""
Return a list containing the periods of ``self``
between `1` and `n - 1`, where `n` is the length
of ``self``.
INPUT:
- ``divide_length`` -- boolean (default: ``False``).
When set to ``True``, then only periods that divide
the length of ``self`` are considered.
OUTPUT:
a list of positive integers
EXAMPLES::
sage: w = Word('ababab')
sage: w.periods()
[2, 4]
sage: w.periods(divide_length=True)
[2]
sage: w = Word('ababa')
sage: w.periods()
[2, 4]
sage: w.periods(divide_length=True)
[]
"""
n = len(self)
if divide_length:
possible = (i for i in range(1,n) if n % i == 0)
else:
possible = range(1, n)
return [x for x in possible if self.has_period(x)]
def longest_common_subword(self, other):
r"""
Return a longest subword of ``self`` and ``other``.
A subword of a word is a subset of the word's letters, read in the
order in which they appear in the word.
For more information, see
:wikipedia:`Longest_common_subsequence_problem`.
INPUT:
- ``other`` -- a word
ALGORITHM:
For any indices `i,j`, we compute the longest common subword ``lcs[i,j]`` of
``self[:i]`` and ``other[:j]``. This can be easily obtained as the longest
of
- ``lcs[i-1,j]``
- ``lcs[i,j-1]``
- ``lcs[i-1,j-1]+self[i]`` if ``self[i]==other[j]``
EXAMPLES::
sage: v1 = Word("abc")
sage: v2 = Word("ace")
sage: v1.longest_common_subword(v2)
word: ac
sage: w1 = Word("1010101010101010101010101010101010101010")
sage: w2 = Word("0011001100110011001100110011001100110011")
sage: w1.longest_common_subword(w2)
word: 00110011001100110011010101010
TESTS::
sage: Word().longest_common_subword(Word())
word:
.. SEEALSO::
:meth:`is_subword_of`
"""
from sage.combinat.words.word import Word
if len(self) == 0 or len(other) == 0:
return Word()
w2 = list(other)
# In order to avoid storing lcs[i,j] for each pair i,j of indices, we
# only store the lcs[i,j] for two consecutive values of i. At any step
# of the algorithm, lcs[i,j] is stored at lcs[0][j] and lcs[i-1,j] is
# stored at lcs[1][j]
# The weird +1 that follows exists to make sure that lcs[i,-1] returns
# the empty word.
lcs = [[[] for i in range(len(w2)+1)] for j in range(2)]
for i,l1 in enumerate(self):
for j,l2 in enumerate(other):
lcs[0][j] = max(lcs[0][j-1], lcs[1][j],
lcs[1][j-1] + ([l1] if l1==l2 else []),key=len)
# Maintaining the meaning of lcs for the next loop
lcs.pop(1)
lcs.insert(0,[[] for i in range(len(w2)+1)])
return Word(lcs[1][-2])
def is_subword_of(self, other):
r"""
Return ``True`` is ``self`` is a subword of ``other``, and ``False`` otherwise.
EXAMPLES::
sage: Word().is_subword_of(Word('123'))
True
sage: Word('123').is_subword_of(Word('3211333213233321'))
True
sage: Word('321').is_subword_of(Word('11122212112122133111222332'))
False
.. SEEALSO::
:meth:`longest_common_subword`
"""
its = iter(self)
try:
s = next(its)
for e in other:
if s == e:
s = next(its)
else:
return False
except StopIteration:
return True
def is_lyndon(self):
r"""
Return ``True`` if ``self`` is a Lyndon word, and ``False``
otherwise.
A *Lyndon word* is a non-empty word that is lexicographically
smaller than each of its proper suffixes (for the given order
on its alphabet). That is, `w` is a Lyndon word if `w` is non-empty
and for each factorization `w = uv` (with `u`, `v` both non-empty),
we have `w < v`.
Equivalently, `w` is a Lyndon word iff `w` is a non-empty word that is
lexicographically smaller than each of its proper conjugates for the
given order on its alphabet.
See for instance [1].
EXAMPLES::
sage: Word('123132133').is_lyndon()
True
sage: Word().is_lyndon()
False
sage: Word('122112').is_lyndon()
False
TESTS:
A sanity check: ``LyndonWords`` generates Lyndon words, so we
filter all words of length `n<10` on the alphabet [1,2,3] for
Lyndon words, and compare with the ``LyndonWords`` generator::
sage: for n in range(1,10):
....: lw1 = [w for w in Words([1,2,3], n) if w.is_lyndon()]
....: lw2 = LyndonWords(3,n)
....: if set(lw1) != set(lw2): print(False)
Filter all words of length 8 on the alphabet [c,a,b] for Lyndon
words, and compare with the :class:`LyndonWords` generator after
mapping [a,b,c] to [2,3,1]::
sage: lw = [w for w in Words('cab', 8) if w.is_lyndon()]
sage: phi = WordMorphism({'a':2,'b':3,'c':1})
sage: set(map(phi, lw)) == set(LyndonWords(3,8))
True
REFERENCES:
- [1] M. Lothaire, Combinatorics On Words, vol. 17 of Encyclopedia
of Mathematics and its Applications, Addison-Wesley, Reading,
Massachusetts, 1983.
"""
if self.is_empty():
return False
key = self.parent().sortkey_letters
n = self.length()
i, j = 0, 1
while j < n:
ki = key(self[i])
kj = key(self[j])
if ki == kj:
# increment i and j
i += 1
j += 1
elif ki < kj:
# reset i, increment j
i = 0
j += 1
else:
# we found the first word in the lyndon factorization;
return False
else:
return i == 0
def lyndon_factorization(self):
r"""
Return the Lyndon factorization of ``self``.
The *Lyndon factorization* of a finite word `w` is the unique
factorization of `w` as a non-increasing product of Lyndon words,
i.e., `w = l_1\cdots l_n` where each `l_i` is a Lyndon word and
`l_1\geq \cdots \geq l_n`. See for instance [1].
OUTPUT:
the list `[l_1, \ldots, l_n]` of factors obtained
EXAMPLES::
sage: Word('010010010001000').lyndon_factorization()
(01, 001, 001, 0001, 0, 0, 0)
sage: Words('10')('010010010001000').lyndon_factorization()
(0, 10010010001000)
sage: Word('abbababbaababba').lyndon_factorization()
(abb, ababb, aababb, a)
sage: Words('ba')('abbababbaababba').lyndon_factorization()
(a, bbababbaaba, bba)
sage: Word([1,2,1,3,1,2,1]).lyndon_factorization()
(1213, 12, 1)
TESTS::
sage: Words('01')('').lyndon_factorization()
()
sage: Word('01').lyndon_factorization()
(01)
sage: Words('10')('01').lyndon_factorization()
(0, 1)
sage: lynfac = Word('abbababbaababba').lyndon_factorization()
sage: [x.is_lyndon() for x in lynfac]
[True, True, True, True]
sage: lynfac = Words('ba')('abbababbaababba').lyndon_factorization()
sage: [x.is_lyndon() for x in lynfac]
[True, True, True]
sage: w = words.ThueMorseWord()[:1000]
sage: w == prod(w.lyndon_factorization())
True
REFERENCES:
- [1] J.-P. Duval, Factorizing words over an ordered alphabet,
J. Algorithms 4 (1983) 363--381.
- [2] G. Melancon, Factorizing infinite words using Maple,
MapleTech journal, vol. 4, no. 1, 1997, pp. 34-42.
"""
key = self.parent().sortkey_letters
# We compute the indexes of the factorization.
n = self.length()
k = -1
F = [0]
while k < n-1:
i = k+1
j = k+2
while j < n:
ki = key(self[i])
kj = key(self[j])
if ki < kj:
i = k+1
j += 1
elif ki == kj:
i += 1
j += 1
else:
break
while k < i:
F.append(k + j - i + 1)
k = k + j - i
return Factorization([self[F[i]:F[i+1]] for i in range(len(F)-1)])
def inversions(self):
r"""
Return a list of the inversions of ``self``. An inversion is a pair
`(i,j)` of non-negative integers `i < j` such that ``self[i] > self[j]``.
EXAMPLES::
sage: Word([1,2,3,2,2,1]).inversions()
[[1, 5], [2, 3], [2, 4], [2, 5], [3, 5], [4, 5]]
sage: Words([3,2,1])([1,2,3,2,2,1]).inversions()
[[0, 1], [0, 2], [0, 3], [0, 4], [1, 2]]
sage: Word('abbaba').inversions()
[[1, 3], [1, 5], [2, 3], [2, 5], [4, 5]]
sage: Words('ba')('abbaba').inversions()
[[0, 1], [0, 2], [0, 4], [3, 4]]
"""
inversion_list = []
cmp_key = self._parent.sortkey_letters
for (i1, letter1) in enumerate(self):
k1 = cmp_key(letter1)
for (i2, letter2) in enumerate(self[i1 + 1:]):
k2 = cmp_key(letter2)
if k1 > k2:
inversion_list.append([i1, i1 + i2 + 1])
return inversion_list
# TODO: This function should be defined for words of integers, but it
# naturally is defined over an alphabet with a rank function....
def degree(self, weights=None):
r"""
Return the weighted degree of ``self``, where the weighted degree of
each letter in the ordered alphabet is given by ``weights``, which
defaults to ``[1, 2, 3, ...]``.
INPUT:
- ``weights`` -- a list or a tuple, or a dictionary keyed by the
letters occurring in ``self``.
EXAMPLES::
sage: Word([1,2,3]).degree()
6
sage: Word([3,2,1]).degree()
6
sage: Words("ab")("abba").degree()
6
sage: Words("ab")("abba").degree([0,2])
4
sage: Words("ab")("abba").degree([-1,-1])
-4
sage: Words("ab")("aabba").degree([1,1])
5
sage: Words([1,2,4])([1,2,4]).degree()
6
sage: Word([1,2,4]).degree()
7
sage: Word("aabba").degree({'a':1,'b':2})
7
sage: Word([0,1,0]).degree({0:17,1:0})
34
"""
if isinstance(weights, dict):
deg = 0
for a in self:
deg += weights[a]
return deg
if hasattr(self._parent._alphabet, "rank"):
rank_fcn = self._parent._alphabet.rank
deg = 0
if weights is None:
rank = {}
for a in self:
if a not in rank:
rank[a] = rank_fcn(a)
deg += rank[a]+1
elif isinstance(weights, (list,tuple)):
rank = {}
for a in self:
if a not in rank:
rank[a] = rank_fcn(a)
deg += weights[rank[a]]
return deg
if all(x in ZZ for x in self):
return sum(self)
raise TypeError("degree is not defined for your word")
def deg_lex_less(self, other, weights=None):
r"""
Return ``True`` if ``self`` is degree lexicographically less than ``other``,
and ``False`` otherwise. The weight of each letter in the ordered
alphabet is given by ``weights``, which defaults to ``[1, 2, 3, ...]``.
EXAMPLES::
sage: Word([1,2,3]).deg_lex_less(Word([1,3,2]))
True
sage: Word([3,2,1]).deg_lex_less(Word([1,2,3]))
False
sage: W = Words(range(5))
sage: W([1,2,4]).deg_lex_less(W([1,3,2]))
False
sage: Word("abba").deg_lex_less(Word("abbb"), dict(a=1,b=2))
True
sage: Word("abba").deg_lex_less(Word("baba"), dict(a=1,b=2))
True
sage: Word("abba").deg_lex_less(Word("aaba"), dict(a=1,b=2))
False
sage: Word("abba").deg_lex_less(Word("aaba"), dict(a=1,b=0))
True
"""
deg_self = self.degree(weights)
deg_other = other.degree(weights)
if deg_self != deg_other:
return deg_self < deg_other
return self.lex_less(other)
def inv_lex_less(self, other):
r"""
Return ``True`` if ``self`` is inverse lexicographically less than ``other``.
EXAMPLES::
sage: Word([1,2,4]).inv_lex_less(Word([1,3,2]))
False
sage: Word([3,2,1]).inv_lex_less(Word([1,2,3]))
True
"""
if self.length() != len(other):
return self.length() < len(other)
return self.reversal() < other.reversal()
def deg_inv_lex_less(self, other, weights=None):
r"""
Return ``True`` if the word ``self`` is degree inverse lexicographically
less than ``other``.
EXAMPLES::
sage: Word([1,2,4]).deg_inv_lex_less(Word([1,3,2]))
False
sage: Word([3,2,1]).deg_inv_lex_less(Word([1,2,3]))
True
"""
d1 = self.degree(weights)
d2 = other.degree(weights)
if d1 != d2:
return d1 < d2
return self.inv_lex_less(other)
def rev_lex_less(self, other):
r"""
Return ``True`` if the word ``self`` is reverse
lexicographically less than ``other``.
EXAMPLES::
sage: Word([1,2,4]).rev_lex_less(Word([1,3,2]))
True
sage: Word([3,2,1]).rev_lex_less(Word([1,2,3]))
False
"""
if self.length() != len(other):
return self.length() > len(other)
return self.reversal() > other.reversal()
def deg_rev_lex_less(self, other, weights=None):
r"""
Return ``True`` if ``self`` is degree reverse
lexicographically less than ``other``.
EXAMPLES::
sage: Word([3,2,1]).deg_rev_lex_less(Word([1,2,3]))
False
sage: Word([1,2,4]).deg_rev_lex_less(Word([1,3,2]))
False
sage: Word([1,2,3]).deg_rev_lex_less(Word([1,2,4]))
True
"""
d1 = self.degree(weights)
d2 = other.degree(weights)
if d1 != d2:
return d1 < d2
return self.rev_lex_less(other)
@cached_method
def last_position_dict(self):
r"""
Return a dictionary that contains the last position of each letter
in ``self``.
EXAMPLES::
sage: Word('1231232').last_position_dict()
{'1': 3, '2': 6, '3': 5}
"""
d = {}
for (i, letter) in enumerate(self):
d[letter] = i
return d
def _pos_in(self, other, p):
r"""
Return the position of the first occurrence of ``self`` starting at
position ``p`` in ``other``.
EXAMPLES::
sage: Word('12')._pos_in(Word('131231'), 2)
2
sage: Word('12')._pos_in(Word('131231'), 3) is None
True
sage: Word('32')._pos_in(Word('131231'), 0) is None
True
The empty word occurs in a word::
sage: Word('')._pos_in(Word('123'), 0)
0
sage: Word('')._pos_in(Word(''), 0)
0
"""
lf = self.length()
lm = other.length()
if lf == 0:
return p
elif lm == 0:
return None
occ = self.last_position_dict()
suff = self.good_suffix_table()
s = p
while s <= lm - lf:
for j in range(lf-1, -1, -1):
a = other[s+j]
if self[j] != a :
s += max(suff[j + 1], j - occ.get(a,-1))
break
else:
return s
return None
def first_pos_in(self, other):
r"""
Return the position of the first occurrence of ``self`` in ``other``,
or ``None`` if ``self`` is not a factor of ``other``.
EXAMPLES::
sage: Word('12').first_pos_in(Word('131231'))
2
sage: Word('32').first_pos_in(Word('131231')) is None
True
"""
return self._pos_in(other, 0)
def find(self, sub, start=0, end=None):
r"""
Return the index of the first occurrence of ``sub`` in ``self``,
such that ``sub`` is contained within ``self[start:end]``.
Return ``-1`` on failure.
INPUT:
- ``sub`` -- string, list, tuple or word to search for.
- ``start`` -- non-negative integer (default: ``0``) specifying
the position from which to start the search.
- ``end`` -- non-negative integer (default: ``None``) specifying
the position at which the search must stop. If ``None``, then
the search is performed up to the end of the string.
OUTPUT:
a non-negative integer or ``-1``
EXAMPLES::
sage: w = Word([0,1,0,0,1])
sage: w.find(Word([1,0]))
1
The ``sub`` argument can also be a tuple or a list::
sage: w.find([1,0])
1
sage: w.find((1,0))
1
Examples using ``start`` and ``end``::
sage: w.find(Word([0,1]), start=1)
3
sage: w.find(Word([0,1]), start=1, end=5)
3
sage: w.find(Word([0,1]), start=1, end=4) == -1
True
sage: w.find(Word([1,1])) == -1
True
sage: w.find("aa")
-1
Instances of ``Word_str`` handle string inputs as well::
sage: w = Word('abac')
sage: w.find('a')
0
sage: w.find('ba')
1
TESTS:
Check that :trac:`12804` is fixed::
sage: w = Word(iter("ababab"), length="finite")
sage: w.find("ab")
0
sage: w.find("ab", start=1)
2
sage: w.find("aa")
-1
sage: w.find("abc")
-1
sage: w = Words('ab')(tuple('babaabaaab'))
sage: w.find('abc')
-1
"""
if not isinstance(sub, FiniteWord_class):
try:
sub = self.parent()(sub)
except (ValueError,TypeError):
return -1
p = sub.first_pos_in(self[start:end])
return -1 if p is None else p+start
def rfind(self, sub, start=0, end=None):
r"""
Return the index of the last occurrence of ``sub`` in ``self``,
such that ``sub`` is contained within ``self[start:end]``.
Return ``-1`` on failure.
INPUT:
- ``sub`` -- string, list, tuple or word to search for.
- ``start`` -- non-negative integer (default: ``0``) specifying
the position at which the search must stop.
- ``end`` -- non-negative integer (default: ``None``) specifying
the position from which to start the search. If ``None``, then
the search is performed up to the end of the string.
OUTPUT:
a non-negative integer or ``-1``
EXAMPLES::
sage: w = Word([0,1,0,0,1])
sage: w.rfind(Word([0,1]))
3
The ``sub`` parameter can also be a list or a tuple::
sage: w.rfind([0,1])
3
sage: w.rfind((0,1))
3
Examples using the argument ``start`` and ``end``::
sage: w.rfind(Word([0,1]), end=4)
0
sage: w.rfind(Word([0,1]), end=5)
3
sage: w.rfind(Word([0,0]), start=2, end=5)
2
sage: w.rfind(Word([0,0]), start=3, end=5)
-1
Instances of ``Word_str`` handle string inputs as well::
sage: w = Word('abac')
sage: w.rfind('a')
2
sage: w.rfind(Word('a'))
2
sage: w.rfind([0,1])
-1
TESTS:
Check that :trac:`12804` is fixed::
sage: w = Word(iter("abab"), length="finite")
sage: w.rfind("ab")
2
sage: w.rfind("ab", end=3)
0
sage: w.rfind("aa")
-1
sage: w.rfind([0,0,0])
-1
"""
if not isinstance(sub, FiniteWord_class):
try:
sub = self.parent()(sub)
except (ValueError,TypeError):
return -1
L = len(sub)
start = max(0, int(start))
if end is None:
i = len(self) - L
else:
i = min(end, len(self)) - L
while i >= start:
if self[i:i+L] == sub: return i
i -= 1
return -1
def is_factor(self, other):
r"""
Return ``True`` if ``self`` is a factor of ``other``, and ``False`` otherwise.
EXAMPLES::
sage: u = Word('2113')
sage: w = Word('123121332131233121132123')
sage: u.is_factor(w)
True
sage: u = Word('321')
sage: w = Word('1231241231312312312')
sage: u.is_factor(w)
False
The empty word is factor of another word::
sage: Word().is_factor(Word())
True
sage: Word().is_factor(Word('a'))
True
sage: Word().is_factor(Word([1,2,3]))
True
sage: Word().is_factor(Word(lambda n:n, length=5))
True
"""
return self.first_pos_in(other) is not None
def factor_occurrences_in(self, other):
r"""
Return an iterator over all occurrences (including overlapping ones)
of ``self`` in ``other`` in their order of appearance.
EXAMPLES::
sage: u = Word('121')
sage: w = Word('121213211213')
sage: list(u.factor_occurrences_in(w))
[0, 2, 8]
"""
if self.length() == 0:
raise NotImplementedError("The factor must be non empty")
p = self._pos_in(other, 0)
while p is not None:
yield p
p = self._pos_in(other, p+1)
def nb_factor_occurrences_in(self, other):
r"""
Return the number of times ``self`` appears as a factor
in ``other``.
EXAMPLES::
sage: Word().nb_factor_occurrences_in(Word('123'))
Traceback (most recent call last):
...
NotImplementedError: The factor must be non empty
sage: Word('123').nb_factor_occurrences_in(Word('112332312313112332121123'))
4
sage: Word('321').nb_factor_occurrences_in(Word('11233231231311233221123'))
0
"""
n = 0
for _ in self.factor_occurrences_in(other):
n += 1
return n
def nb_subword_occurrences_in(self, other):
r"""
Return the number of times ``self`` appears in ``other`` as a subword.
This corresponds to the notion of `binomial coefficient` of two
finite words whose properties are presented in the chapter of
Lothaire's book written by Sakarovitch and Simon [1].
INPUT:
- ``other`` -- finite word
EXAMPLES::
sage: tm = words.ThueMorseWord()
sage: u = Word([0,1,0,1])
sage: u.nb_subword_occurrences_in(tm[:1000])
2604124996
sage: u = Word([0,1,0,1,1,0])
sage: u.nb_subword_occurrences_in(tm[:100])
20370432
.. NOTE::
This code, based on [2], actually compute the number of
occurrences of all prefixes of ``self`` as subwords in all
prefixes of ``other``. In particular, its complexity is
bounded by ``len(self) * len(other)``.
TESTS::
sage: Word('').nb_subword_occurrences_in(Word(''))
1
sage: parent(_)
Integer Ring
sage: v,u = Word(), Word('123')
sage: v.nb_subword_occurrences_in(u)
1
sage: v,u = Word('123'), Word('1133432311132311112')
sage: v.nb_subword_occurrences_in(u)
11
sage: v,u = Word('4321'), Word('1132231112233212342231112')
sage: v.nb_subword_occurrences_in(u)
0
sage: v,u = Word('3'), Word('122332112321213')
sage: v.nb_subword_occurrences_in(u)
4
sage: v,u = Word([]), words.ThueMorseWord()[:1000]
sage: v.nb_subword_occurrences_in(u)
1
REFERENCES:
- [1] M. Lothaire, Combinatorics on Words, Cambridge University
Press, (1997).
- [2] Mateescu, A., Salomaa, A., Salomaa, K. and Yu, S., A
sharpening of the Parikh mapping. Theoret. Informatics Appl. 35
(2001) 551-564.
"""
# record the position of letters in self
pos = defaultdict(list)
for i,a in enumerate(self):
pos[a].append(i)
for a in pos:
pos[a].reverse()
# compute the occurrences of all prefixes of self as subwords in other
occ = [ZZ.zero()] * (len(self)+1)
occ[0] = ZZ.one()
for a in other:
for i in pos[a]:
occ[i+1] += occ[i]
# return only the number of occurrences of self
return occ[-1]
def _return_words_list(self, fact):
r"""
Return the return words as a list in the order they appear in the word.
INPUT:
- ``fact`` -- a non-empty finite word
OUTPUT:
a Python list of finite words
TESTS::
sage: Word('baccabccbacbca')._return_words_list(Word('b'))
[word: bacca, word: bcc, word: bac]
"""
return list(self.return_words_iterator(fact))
def return_words(self, fact):
r"""
Return the set of return words of ``fact`` in ``self``.
This is the set of all factors starting by the given factor and ending
just before the next occurrence of this factor. See [1] and [2].
INPUT:
- ``fact`` -- a non-empty finite word
OUTPUT:
a Python set of finite words
EXAMPLES::
sage: Word('21331233213231').return_words(Word('2'))
{word: 213, word: 21331, word: 233}
sage: Word().return_words(Word('213'))
set()
sage: Word('121212').return_words(Word('1212'))
{word: 12}
::
sage: TM = words.ThueMorseWord()[:1000]
sage: sorted(TM.return_words(Word([0])))
[word: 0, word: 01, word: 011]
REFERENCES:
- [1] F. Durand, A characterization of substitutive sequences using
return words, Discrete Math. 179 (1998) 89-101.
- [2] C. Holton, L.Q. Zamboni, Descendants of primitive substitutions,
Theory Comput. Syst. 32 (1999) 133-157.
"""
return set(self.return_words_iterator(fact))
def complete_return_words(self, fact):
r"""
Return the set of complete return words of ``fact`` in ``self``.
This is the set of all factors starting by the given factor and ending
just after the next occurrence of this factor. See for instance [1].
INPUT:
- ``fact`` -- a non-empty finite word
OUTPUT:
a Python set of finite words
EXAMPLES::
sage: s = Word('21331233213231').complete_return_words(Word('2'))
sage: sorted(s)
[word: 2132, word: 213312, word: 2332]
sage: Word('').complete_return_words(Word('213'))
set()
sage: Word('121212').complete_return_words(Word('1212'))
{word: 121212}
REFERENCES:
- [1] J. Justin, L. Vuillon, Return words in Sturmian and
episturmian words, Theor. Inform. Appl. 34 (2000) 343--356.
"""
return set(self.complete_return_words_iterator(fact))
def return_words_derivate(self, fact):
r"""
Return the word generated by mapping a letter to each occurrence of
the return words for the given factor dropping any dangling prefix and
suffix. See for instance [1].
EXAMPLES::
sage: Word('12131221312313122').return_words_derivate(Word('1'))
word: 123242
REFERENCES:
- [1] F. Durand, A characterization of substitutive sequences using
return words, Discrete Math. 179 (1998) 89--101.
"""
idx = 0
tab = {}
ret = [tab.setdefault(w, len(tab)) + 1 for w in self._return_words_list(fact)]
from sage.combinat.words.word import Word
return Word(ret)
def is_quasiperiodic(self):
r"""
Return ``True`` if ``self`` is quasiperiodic, and ``False`` otherwise.
A finite or infinite word `w` is *quasiperiodic* if it can be
constructed by concatenations and superpositions of one of its proper
factors `u`, which is called a *quasiperiod* of `w`.
See for instance [1], [2], and [3].
EXAMPLES::
sage: Word('abaababaabaababaaba').is_quasiperiodic()
True
sage: Word('abacaba').is_quasiperiodic()
False
sage: Word('a').is_quasiperiodic()
False
sage: Word().is_quasiperiodic()
False
sage: Word('abaaba').is_quasiperiodic()
True
REFERENCES:
- [1] A. Apostolico, A. Ehrenfeucht, Efficient detection of
quasiperiodicities in strings, Theoret. Comput. Sci. 119 (1993)
247--265.
- [2] S. Marcus, Quasiperiodic infinite words, Bull. Eur. Assoc.
Theor. Comput. Sci. 82 (2004) 170-174.
- [3] A. Glen, F. Levé, G. Richomme, Quasiperiodic and Lyndon
episturmian words, Preprint, 2008, arXiv:0805.0730.
"""
l = self.length()
if l <= 1:
return False
for i in range(1, l - 1):
return_lengths = [x.length() for x in self.return_words(self[:i])]
if return_lengths != []:
if (max(return_lengths) <= i and self[l-i:l] == self[:i]):
return True
return False
def quasiperiods(self):
r"""
Return the quasiperiods of ``self`` as a list ordered from shortest to
longest.
Let `w` be a finite or infinite word. A *quasiperiod* of `w` is a
proper factor `u` of `w` such that the occurrences of `u` in `w`
entirely cover `w`, i.e., every position of `w` falls within some
occurrence of `u` in `w`. See for instance [1], [2], and [3].
EXAMPLES::
sage: Word('abaababaabaababaaba').quasiperiods()
[word: aba, word: abaaba, word: abaababaaba]
sage: Word('abaaba').quasiperiods()
[word: aba]
sage: Word('abacaba').quasiperiods()
[]
REFERENCES:
- [1] A. Apostolico, A. Ehrenfeucht, Efficient detection of
quasiperiodicities in strings, Theoret. Comput. Sci. 119 (1993)
247--265.
- [2] S. Marcus, Quasiperiodic infinite words, Bull. Eur. Assoc.
Theor. Comput. Sci. 82 (2004) 170-174.
- [3] A. Glen, F. Levé, G. Richomme, Quasiperiodic and Lyndon
episturmian words, Preprint, 2008, arXiv:0805.0730.
"""
l = self.length()
if l <= 1:
return []
Q = []
for i in range(1, l - 1):
return_lengths = [x.length() for x in self.return_words(self[:i])]
if return_lengths != []:
if (max(return_lengths) <= i and self[l-i:l] == self[:i]):
Q.append(self[:i])
return Q
def crochemore_factorization(self):
r"""
Return the Crochemore factorization of ``self`` as an ordered list of
factors.
The *Crochemore factorization* of a finite word `w` is the unique
factorization: `(x_1, x_2, \ldots, x_n)` of `w` with each `x_i`
satisfying either:
C1. `x_i` is a letter that does not appear in `u = x_1\ldots x_{i-1}`;
C2. `x_i` is the longest prefix of `v = x_i\ldots x_n` that also
has an occurrence beginning within `u = x_1\ldots x_{i-1}`. See [1].
.. note::
This is not a very good implementation, and should be improved.
EXAMPLES::
sage: x = Word('abababb')
sage: x.crochemore_factorization()
(a, b, abab, b)
sage: mul(x.crochemore_factorization()) == x
True
sage: y = Word('abaababacabba')
sage: y.crochemore_factorization()
(a, b, a, aba, ba, c, ab, ba)
sage: mul(y.crochemore_factorization()) == y
True
sage: x = Word([0,1,0,1,0,1,1])
sage: x.crochemore_factorization()
(0, 1, 0101, 1)
sage: mul(x.crochemore_factorization()) == x
True
REFERENCES:
- [1] M. Crochemore, Recherche linéaire d'un carré dans un mot,
C. R. Acad. Sci. Paris Sér. I Math. 296 (1983) 14 781--784.
"""
c = Factorization([self[:1]])
u = self[:sum(map(len,c))] # = x_1 ... x_{i-1}
v = self[sum(map(len,c)):] # = x_i ... x_n
while v:
# C1. x_i is a letter that does not appear in u = x_1...x_{i-1}
if v[0] not in u:
c.append(v[:1])
else:
# C2. x_i is the longest prefix of v = x_i...x_n that also has an
# occurrence beginning within u = x_1...x_{i-1}.
xi = v
while True:
if xi.first_pos_in(self) < u.length():
c.append(xi)
break
else:
xi = xi[:-1]
u = self[:sum(map(len,c))] # = x_1 ... x_{i-1}
v = self[sum(map(len,c)):] # = x_i ... x_n
return c
def evaluation_dict(self):
r"""
Return a dictionary keyed by the letters occurring in ``self`` with
values the number of occurrences of the letter.
EXAMPLES::
sage: Word([2,1,4,2,3,4,2]).evaluation_dict()
{1: 1, 2: 3, 3: 1, 4: 2}
sage: Word('badbcdb').evaluation_dict()
{'a': 1, 'b': 3, 'c': 1, 'd': 2}
sage: Word().evaluation_dict()
{}
::
sage: f = Word('1213121').evaluation_dict() # keys appear in random order
{'1': 4, '2': 2, '3': 1}
TESTS::
sage: f = Word('1213121').evaluation_dict()
sage: f['1'] == 4
True
sage: f['2'] == 2
True
sage: f['3'] == 1
True
"""
return evaluation_dict(self)
def evaluation_sparse(self):
r"""
Return a list representing the evaluation of ``self``. The entries of
the list are two-element lists ``[a, n]``, where ``a`` is a letter
occurring in ``self`` and ``n`` is the number of occurrences of ``a`` in ``self``.
EXAMPLES::
sage: Word([4,4,2,5,2,1,4,1]).evaluation_sparse()
[(1, 2), (2, 2), (4, 3), (5, 1)]
sage: Word("abcaccab").evaluation_sparse()
[('a', 3), ('c', 3), ('b', 2)]
"""
return self.evaluation_dict().items()
def evaluation_partition(self):
r"""
Return the evaluation of the word w as a partition.
EXAMPLES::
sage: Word("acdabda").evaluation_partition()
[3, 2, 1, 1]
sage: Word([2,1,4,2,3,4,2]).evaluation_partition()
[3, 2, 1, 1]
"""
p = sorted(self.evaluation_dict().values(), reverse=True)
from sage.combinat.partition import Partition
if 0 in p:
return Partition(p[:p.index(0)])
else:
return Partition(p)
def overlap_partition(self, other, delay=0, p=None, involution=None) :
r"""
Return the partition of the alphabet induced by the overlap of
``self`` and ``other`` with the given ``delay``.
The partition of the alphabet is given by the equivalence
relation obtained from the symmetric, reflexive and transitive
closure of the set of pairs of letters
`R_{u,v,d} = \{ (u_k, v_{k-d}) : 0 \leq k < n, 0\leq k-d < m \}`
where `u = u_0 u_1 \cdots u_{n-1}`, `v = v_0v_1\cdots v_{m-1}` are
two words on the alphabet `A` and `d` is an integer.
The equivalence relation defined by `R` is inspired from [1].
INPUT:
- ``other`` -- word on the same alphabet as ``self``
- ``delay`` -- integer (default: ``0``)
- ``p`` -- disjoint sets data structure (optional, default: ``None``),
a partition of the alphabet into disjoint sets to start with.
If ``None``, each letter start in distinct equivalence classes.
- ``involution`` -- callable (optional, default: ``None``), an
involution on the alphabet. If ``involution`` is not ``None``, the relation
`R_{u,v,d} \cup R_{involution(u),involution(v),d}` is considered.
OUTPUT:
a disjoint set data structure
EXAMPLES::
sage: W = Words(list('abc') + list(range(6)))
sage: u = W('abc')
sage: v = W(range(5))
sage: u.overlap_partition(v)
{{0, 'a'}, {1, 'b'}, {2, 'c'}, {3}, {4}, {5}}
sage: u.overlap_partition(v, 2)
{{'a'}, {'b'}, {0, 'c'}, {1}, {2}, {3}, {4}, {5}}
sage: u.overlap_partition(v, -1)
{{0}, {1, 'a'}, {2, 'b'}, {3, 'c'}, {4}, {5}}
You can re-use the same disjoint set and do more than one overlap::
sage: p = u.overlap_partition(v, 2)
sage: p
{{'a'}, {'b'}, {0, 'c'}, {1}, {2}, {3}, {4}, {5}}
sage: u.overlap_partition(v, 1, p)
{{'a'}, {0, 1, 'b', 'c'}, {2}, {3}, {4}, {5}}
The function ``overlap_partition`` can be used to study equations
on words. For example, if a word `w` overlaps itself with delay `d`, then
`d` is a period of `w`::
sage: W = Words(range(20))
sage: w = W(range(14)); w
word: 0,1,2,3,4,5,6,7,8,9,10,11,12,13
sage: d = 5
sage: p = w.overlap_partition(w, d)
sage: m = WordMorphism(p.element_to_root_dict())
sage: w2 = m(w); w2
word: 56789567895678
sage: w2.minimal_period() == d
True
If a word is equal to its reversal, then it is a palindrome::
sage: W = Words(range(20))
sage: w = W(range(17)); w
word: 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16
sage: p = w.overlap_partition(w.reversal(), 0)
sage: m = WordMorphism(p.element_to_root_dict())
sage: w2 = m(w); w2
word: 01234567876543210
sage: w2.parent()
Finite words over {0, 1, 2, 3, 4, 5, 6, 7, 8, 17, 18, 19}
sage: w2.is_palindrome()
True
If the reversal of a word `w` is factor of its square `w^2`, then
`w` is symmetric, i.e. the product of two palindromes::
sage: W = Words(range(10))
sage: w = W(range(10)); w
word: 0123456789
sage: p = (w*w).overlap_partition(w.reversal(), 4)
sage: m = WordMorphism(p.element_to_root_dict())
sage: w2 = m(w); w2
word: 0110456654
sage: w2.is_symmetric()
True
If the image of the reversal of a word `w` under an involution `f`
is factor of its square `w^2`, then `w` is `f`-symmetric::
sage: W = Words([-11,-9,..,11])
sage: w = W([1,3,..,11])
sage: w
word: 1,3,5,7,9,11
sage: inv = lambda x:-x
sage: f = WordMorphism(dict( (a, inv(a)) for a in W.alphabet()))
sage: p = (w*w).overlap_partition(f(w).reversal(), 2, involution=f)
sage: m = WordMorphism(p.element_to_root_dict())
sage: m(w)
word: 1,-1,5,7,-7,-5
sage: m(w).is_symmetric(f)
True
TESTS::
sage: W = Words('abcdef')
sage: w = W('abc')
sage: y = W('def')
sage: w.overlap_partition(y, -3)
{{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}}
sage: w.overlap_partition(y, -2)
{{'a', 'f'}, {'b'}, {'c'}, {'d'}, {'e'}}
sage: w.overlap_partition(y, -1)
{{'a', 'e'}, {'b', 'f'}, {'c'}, {'d'}}
sage: w.overlap_partition(y, 0)
{{'a', 'd'}, {'b', 'e'}, {'c', 'f'}}
sage: w.overlap_partition(y, 1)
{{'a'}, {'b', 'd'}, {'c', 'e'}, {'f'}}
sage: w.overlap_partition(y, 2)
{{'a'}, {'b'}, {'c', 'd'}, {'e'}, {'f'}}
sage: w.overlap_partition(y, 3)
{{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}}
sage: w.overlap_partition(y, 4)
{{'a'}, {'b'}, {'c'}, {'d'}, {'e'}, {'f'}}
::
sage: W = Words(range(2))
sage: w = W([0,1,0,1,0,1]); w
word: 010101
sage: w.overlap_partition(w, 0)
{{0}, {1}}
sage: w.overlap_partition(w, 1)
{{0, 1}}
::
sage: empty = Word()
sage: empty.overlap_partition(empty, 'yo')
Traceback (most recent call last):
...
TypeError: delay (=yo) must be an integer
sage: empty.overlap_partition(empty,2,'yo')
Traceback (most recent call last):
...
TypeError: p(=yo) is not a DisjointSet
The ``involution`` input can be any callable::
sage: w = Words([-5,..,5])([-5..5])
sage: inv = lambda x:-x
sage: w.overlap_partition(w, 2, involution=inv)
{{-4, -2, 0, 2, 4}, {-5, -3, -1, 1, 3, 5}}
REFERENCES:
- [1] S. Labbé, Propriétés combinatoires des `f`-palindromes,
Mémoire de maîtrise en Mathématiques, Montréal, UQAM, 2008,
109 pages.
"""
if not isinstance(delay, (int, Integer)):
raise TypeError("delay (=%s) must be an integer"%delay)
elif delay < 0:
return other.overlap_partition(self, -delay, p)
from sage.sets.disjoint_set import DisjointSet_class
if p is None:
if self.parent().alphabet().cardinality() is Infinity:
raise ValueError("The alphabet of the parent must be finite")
from sage.sets.disjoint_set import DisjointSet
p = DisjointSet(self.parent().alphabet())
elif not isinstance(p, DisjointSet_class):
raise TypeError("p(=%s) is not a DisjointSet" % p)
#Join the classes of each pair of letters that are one above the other
from sage.combinat.words.morphism import WordMorphism
S = zip(islice(self, delay, None), other)
if involution is None:
for (a,b) in S:
p.union(a, b)
elif isinstance(involution, WordMorphism):
for (a,b) in S:
p.union(a, b)
# take the first letter of the word
p.union(involution(a)[0], involution(b)[0])
elif callable(involution):
for (a,b) in S:
p.union(a, b)
p.union(involution(a), involution(b))
else:
raise TypeError("involution (=%s) must be callable"%involution)
return p
# TODO: requires a parent with a sortkey_letters method
def standard_permutation(self):
r"""
Return the standard permutation of the word
``self`` on the ordered alphabet. It is defined as
the permutation with exactly the same number of
inversions as w. Equivalently, it is the permutation
of minimal length whose inverse sorts ``self``.
EXAMPLES::
sage: w = Word([1,2,3,2,2,1]); w
word: 123221
sage: p = w.standard_permutation(); p
[1, 3, 6, 4, 5, 2]
sage: v = Word(p.inverse().action(w)); v
word: 112223
sage: filter(lambda q: q.length() <= p.length() and \
....: q.inverse().action(w) == list(v), \
....: Permutations(w.length()) )
[[1, 3, 6, 4, 5, 2]]
::
sage: w = Words([1,2,3])([1,2,3,2,2,1,2,1]); w
word: 12322121
sage: p = w.standard_permutation(); p
[1, 4, 8, 5, 6, 2, 7, 3]
sage: Word(p.inverse().action(w))
word: 11122223
::
sage: w = Words([3,2,1])([1,2,3,2,2,1,2,1]); w
word: 12322121
sage: p = w.standard_permutation(); p
[6, 2, 1, 3, 4, 7, 5, 8]
sage: Word(p.inverse().action(w))
word: 32222111
::
sage: w = Words('ab')('abbaba'); w
word: abbaba
sage: p = w.standard_permutation(); p
[1, 4, 5, 2, 6, 3]
sage: Word(p.inverse().action(w))
word: aaabbb
::
sage: w = Words('ba')('abbaba'); w
word: abbaba
sage: p = w.standard_permutation(); p
[4, 1, 2, 5, 3, 6]
sage: Word(p.inverse().action(w))
word: bbbaaa
"""
from sage.combinat.permutation import to_standard
return to_standard(self, key=self.parent().sortkey_letters)
def _s(self, i):
r"""
Implement Lascoux and Schützenberger `s_i` operator, swap the
number of ``i`` and ``i+1`` in a word.
EXAMPLES::
sage: w = Word([1,1,2,1,2,2,1,3,1,2])
sage: w._s(1)
word: 1221221312
TESTS::
sage: w = Word([])
sage: w._s(1)
word:
sage: w = Words(3)([2,1,2])
sage: w._s(1).parent()
Finite words over {1, 2, 3}
"""
unpaired_i = [] # positions of unpaired is
unpaired_ip = [] # positions of unpaired i+1s
for p,x in enumerate(self):
if x == i:
if unpaired_ip:
unpaired_ip.pop()
else:
unpaired_i.append(p)
elif x == i+1:
unpaired_ip.append(p)
unpaired = unpaired_i + unpaired_ip
out = list(self)
# replace the unpaired subword i^a (i+1)^b
# with i^b (i+1)^a
for j,p in enumerate(unpaired):
if j < len(unpaired_ip):
out[p] = i
else:
out[p] = i+1
return self.parent()(out, check=False)
def _to_partition_content(self):
r"""
Return the conversion of ``self`` to a word with partition content using
the `s_i` operators of Lascoux and Schützenberger.
EXAMPLES::
sage: w = Word([1,3,2,1,2,3,4,6,4,2,3,2])
sage: w._to_partition_content()
word: 132112454132
sage: Word([])._to_partition_content()
word:
"""
if self.length() == 0:
return self
from sage.combinat.words.word import Word
n = max(self)
ev = Word( Words(n)(self).evaluation() )
sig = ev.reversal().standard_permutation().reduced_word()
# sig is now the reverse complement of a reduced word for a minimal
# length permutation which would sort the letters of ev into a
# partition
out = self
for i in reversed(sig):
out = out._s(n-i)
return out
def cocharge(self):
r"""
Return the cocharge of ``self``. For a word `w`, this can be defined as
`n_{ev} - ch(w)`, where `ch(w)` is the charge of `w` and `ev` is the
evaluation of `w`, and `n_{ev}` is `\sum_{i<j} min(ev_i, ev_j)`.
EXAMPLES::
sage: Word([1,2,3]).cocharge()
0
sage: Word([3,2,1]).cocharge()
3
sage: Word([1,1,2]).cocharge()
0
sage: Word([2,1,2]).cocharge()
1
TESTS::
sage: Word([]).cocharge()
0
"""
return self.evaluation_partition().weighted_size() - self.charge()
def charge(self, check=True):
r"""
Return the charge of ``self``. This is defined as follows.
If `w` is a permutation of length `n`, (in other words, the evaluation
of `w` is `(1, 1, \dots, 1)`), the statistic charge(`w`) is given by
`\sum_{i=1}^n c_i(w)` where `c_1(w) = 0` and `c_i(w)` is defined
recursively by setting `p_i` equal to `1` if `i` appears to the right
of `i-1` in `w` and `0` otherwise. Then we set `c_i(w) = c_{i-1}(w) +
p_i`.
EXAMPLES::
sage: Word([1, 2, 3]).charge()
3
sage: Word([3, 5, 1, 4, 2]).charge() == 0 + 1 + 1 + 2 + 2
True
If `w` is not a permutation, but the evaluation of `w` is a partition,
the charge of `w` is defined to be the sum of its charge subwords
(each of which will be a permutation). The first charge subword is
found by starting at the end of `w` and moving left until the first
`1` is found. This is marked, and we continue to move to the left
until the first `2` is found, wrapping around from the beginning of
the word back to the end, if necessary. We mark this `2`, and
continue on until we have marked the largest letter in `w`. The
marked letters, with relative order preserved, form the first charge
subword of `w`. This subword is removed, and the next charge subword
is found in the same manner from the remaining letters. In the
following example, `w1, w2, w3` are the charge subwords of `w`.
EXAMPLES::
sage: w = Word([5,2,3,4,4,1,1,1,2,2,3])
sage: w1 = Word([5, 2, 4, 1, 3])
sage: w2 = Word([3, 4, 1, 2])
sage: w3 = Word([1, 2])
sage: w.charge() == w1.charge() + w2.charge() + w3.charge()
True
Finally, if `w` does not have partition content, we apply the
Lascoux-Schützenberger standardization operators `s_i` in such a
manner as to obtain a word with partition content. (The word we obtain
is independent of the choice of operators.) The charge is then
defined to be the charge of this word::
sage: Word([3,3,2,1,1]).charge()
0
sage: Word([1,2,3,1,2]).charge()
2
Note that this differs from the definition of charge given in
Macdonald's book. The difference amounts to a choice of
reading a word from left-to-right or right-to-left. The choice in
Sage was made to agree with the definition of a reading word of a
tableau in Sage, and seems to be the more common convention in the
literature.
REFERENCES:
[1] Ian Macdonald, *Symmetric Functions and Hall Polynomials* second
edition, 1995, Oxford University Press
[2] A. Lascoux, L. Lapointe, and J. Morse. *Tableau atoms and a new
Macdonald positivity conjecture.* Duke Math Journal, **116 (1)**,
2003. Available at: [http://arxiv.org/abs/math/0008073]
[3] A. Lascoux, B. Leclerc, and J.Y. Thibon. *The Plactic Monoid*.
Survey article available at
[http://www-igm.univ-mlv.fr/~jyt/ARTICLES/plactic.ps]
TESTS::
sage: Word([1,1,2,2,3]).charge()
4
sage: Word([3,1,1,2,2]).charge()
3
sage: Word([2,1,1,2,3]).charge()
2
sage: Word([2,1,1,3,2]).charge()
2
sage: Word([3,2,1,1,2]).charge()
1
sage: Word([2,2,1,1,3]).charge()
1
sage: Word([3,2,2,1,1]).charge()
0
sage: Word([]).charge()
0
"""
if check:
ev_dict = self.evaluation_dict()
ordered_alphabet = sorted(ev_dict,
key=self.parent().sortkey_letters)
evaluation = [ev_dict[a] for a in ordered_alphabet]
from sage.combinat.partition import Partitions
if evaluation not in Partitions():
return self._to_partition_content().charge()
res = 0
w = self.to_integer_list()
while len(w) != 0:
i =len(w) - 1
l = min(w)
index = 0
while len(w) != 0 and l <= max(w):
while w[i] != l:
i -= 1
if i < 0:
i = len(w) - 1
index += 1
res += index
l += 1
w.pop(i)
i -= 1
if i < 0:
i = len(w) - 1
index += 1
return res
def BWT(self):
r"""
Return the Burrows-Wheeler Transform (BWT) of ``self``.
The *Burrows-Wheeler transform* of a finite word `w` is obtained
from `w` by first listing the conjugates of `w` in lexicographic order
and then concatenating the final letters of the conjugates in this
order. See [1].
EXAMPLES::
sage: Word('abaccaaba').BWT()
word: cbaabaaca
sage: Word('abaab').BWT()
word: bbaaa
sage: Word('bbabbaca').BWT()
word: cbbbbaaa
sage: Word('aabaab').BWT()
word: bbaaaa
sage: Word().BWT()
word:
sage: Word('a').BWT()
word: a
REFERENCES:
- [1] M. Burrows, D.J. Wheeler, "A block-sorting lossless data
compression algorithm", HP Lab Technical Report, 1994, available
at http://www.hpl.hp.com/techreports/Compaq-DEC/SRC-RR-124.html
"""
if self.is_empty():
return self
conjugates = sorted(self._conjugates_list())
return self.parent()([x[x.length()-1] for x in conjugates], check=False)
def iterated_left_palindromic_closure(self, f=None):
r"""
Return the iterated left (``f``-)palindromic closure of ``self``.
INPUT:
- ``f`` -- involution (default: ``None``) on the alphabet of ``self``.
It must be callable on letters as well as words (e.g. ``WordMorphism``).
OUTPUT:
word -- the left iterated ``f``-palindromic closure of ``self``.
EXAMPLES::
sage: Word('123').iterated_left_palindromic_closure()
word: 3231323
sage: f = WordMorphism('a->b,b->a')
sage: Word('ab').iterated_left_palindromic_closure(f=f)
word: abbaab
sage: Word('aab').iterated_left_palindromic_closure(f=f)
word: abbaabbaab
TESTS:
If ``f`` is not an involution::
sage: f = WordMorphism('a->b,b->b')
sage: Word('aab').iterated_left_palindromic_closure(f=f)
Traceback (most recent call last):
...
TypeError: self (=a->b, b->b) is not an endomorphism
REFERENCES:
- A. de Luca, A. De Luca, Pseudopalindrome closure operators
in free monoids, Theoret. Comput. Sci. 362 (2006) 282--300.
"""
if f is None:
return self.reversal().iterated_right_palindromic_closure(f=f)
else:
from sage.combinat.words.morphism import WordMorphism
f = WordMorphism(f)
return f(self).reversal().iterated_right_palindromic_closure(f=f)
def count(self, letter):
r"""
Count the number of occurrences of ``letter`` in ``self``.
EXAMPLES::
sage: Word('abbabaab').count('a')
4
"""
return Integer(sum(1 for a in self if a == letter))
def balance(self):
r"""
Return the balance of ``self``.
The balance of a word is the smallest number `q` such that ``self`` is
`q`-balanced [1].
A finite or infinite word `w` is said to be `q`-*balanced* if for
any two factors `u`, `v` of `w` of the same length, the difference
between the number of `x`'s in each of `u` and `v` is at most `q`
for all letters `x` in the alphabet of `w`. A `1`-balanced word is
simply said to be balanced. See Chapter 2 of [2].
OUTPUT:
integer
EXAMPLES::
sage: Word('1111111').balance()
0
sage: Word('001010101011').balance()
2
sage: Word('0101010101').balance()
1
::
sage: w = Word('11112222')
sage: w.is_balanced(2)
False
sage: w.is_balanced(3)
False
sage: w.is_balanced(4)
True
sage: w.is_balanced(5)
True
sage: w.balance()
4
TESTS::
sage: Word('1111122222').balance()
5
sage: Word('').balance()
0
sage: Word('1').balance()
0
sage: Word('12').balance()
1
sage: Word('1112').balance()
1
REFERENCES:
- [1] I. Fagnot, L. Vuillon, Generalized balances in Sturmian words,
Discrete Applied Mathematics 121 (2002), 83--101.
- [2] M. Lothaire, Algebraic Combinatorics On Words, vol. 90 of
Encyclopedia of Mathematics and its Applications, Cambridge
University Press, U.K., 2002.
"""
alphabet = self.letters()
best = 0
for i in range(1, self.length()):
start = iter(self)
end = iter(self)
abelian = dict(zip(alphabet, [0]*len(alphabet)))
for _ in range(i):
abelian[next(end)] += 1
abel_max = abelian.copy()
abel_min = abelian.copy()
for _ in range(self.length() - i):
lost = next(start)
gain = next(end)
abelian[gain] += 1
abelian[lost] -= 1
abel_max[gain] = max(abel_max[gain], abelian[gain])
abel_min[lost] = min(abel_min[lost], abelian[lost])
best = max(best, max(abel_max[a] - abel_min[a] for a in alphabet))
return best
def is_balanced(self, q=1):
r"""
Return ``True`` if ``self`` is ``q``-balanced, and ``False`` otherwise.
A finite or infinite word `w` is said to be `q`-*balanced* if for
any two factors `u`, `v` of `w` of the same length, the difference
between the number of `x`'s in each of `u` and `v` is at most `q`
for all letters `x` in the alphabet of `w`. A `1`-balanced word is
simply said to be balanced. See for instance [1] and Chapter 2 of
[2].
INPUT:
- ``q`` -- integer (default: ``1``), the balance level
OUTPUT:
boolean -- the result
EXAMPLES::
sage: Word('1213121').is_balanced()
True
sage: Word('1122').is_balanced()
False
sage: Word('121333121').is_balanced()
False
sage: Word('121333121').is_balanced(2)
False
sage: Word('121333121').is_balanced(3)
True
sage: Word('121122121').is_balanced()
False
sage: Word('121122121').is_balanced(2)
True
TESTS::
sage: Word('121122121').is_balanced(-1)
Traceback (most recent call last):
...
TypeError: the balance level must be a positive integer
sage: Word('121122121').is_balanced(0)
Traceback (most recent call last):
...
TypeError: the balance level must be a positive integer
sage: Word('121122121').is_balanced('a')
Traceback (most recent call last):
...
TypeError: the balance level must be a positive integer
REFERENCES:
- [1] J. Cassaigne, S. Ferenczi, L.Q. Zamboni, Imbalances in
Arnoux-Rauzy sequences, Ann. Inst. Fourier (Grenoble) 50 (2000)
1265--1276.
- [2] M. Lothaire, Algebraic Combinatorics On Words, vol. 90 of
Encyclopedia of Mathematics and its Applications, Cambridge
University Press, U.K., 2002.
"""
if not isinstance(q, (int, Integer)) or q <= 0:
raise TypeError("the balance level must be a positive integer")
alphabet = self.letters()
for i in range(2, self.length()):
empty_sets = [set() for _ in range(len(alphabet))]
tab = dict(zip(alphabet, empty_sets))
for fact in self.factor_iterator(i):
evaluation_dict = fact.evaluation_dict()
for a in alphabet:
tab[a].add(evaluation_dict.get(a, 0))
for t in tab.values():
if len(t) > q+1:
return False
return True
def abelian_vectors(self, n):
r"""
Return the abelian vectors of factors of length ``n`` of ``self``.
The vectors are defined w.r.t. the order of the alphabet of the
parent.
OUTPUT:
a set of tuples
EXAMPLES::
sage: W = Words([0,1,2])
sage: w = W([0,1,1,0,1,2,0,2,0,2])
sage: w.abelian_vectors(3)
{(1, 0, 2), (1, 1, 1), (1, 2, 0), (2, 0, 1)}
sage: w[:5].abelian_vectors(3)
{(1, 2, 0)}
sage: w[5:].abelian_vectors(3)
{(1, 0, 2), (2, 0, 1)}
::
sage: w = words.FibonacciWord()[:100]
sage: sorted(w.abelian_vectors(0))
[(0, 0)]
sage: sorted(w.abelian_vectors(1))
[(0, 1), (1, 0)]
sage: sorted(w.abelian_vectors(7))
[(4, 3), (5, 2)]
The word must be defined with a parent on a finite alphabet::
sage: from itertools import count
sage: w = Word(count(), alphabet=NN)
sage: w[:2].abelian_vectors(2)
Traceback (most recent call last):
...
TypeError: The alphabet of the parent is infinite; define the
word with a parent on a finite alphabet
TESTS::
sage: W = Words([0, 1])
sage: w = W([0,0,0])
sage: sorted(w.abelian_vectors(3))
[(3, 0)]
sage: w = W([0,0,0,1])
sage: sorted(w.abelian_vectors(3))
[(2, 1), (3, 0)]
sage: w = W([0,0,0,1,1])
sage: sorted(w.abelian_vectors(3))
[(1, 2), (2, 1), (3, 0)]
sage: w = W([0,0,0,1,1,1])
sage: sorted(w.abelian_vectors(3))
[(0, 3), (1, 2), (2, 1), (3, 0)]
::
sage: w = Word([0,1,0], alphabet=[0,1])
sage: w.abelian_complexity(3)
1
sage: w.abelian_complexity(4)
0
"""
alphabet = self.parent().alphabet()
size = alphabet.cardinality()
if size == float('inf'):
raise TypeError("The alphabet of the parent is infinite; define"
" the word with a parent on a finite alphabet")
S = set()
if n > self.length():
return S
rank = dict((letter,i) for i,letter in enumerate(alphabet))
start = iter(self)
end = iter(self)
abelian = [0] * size
for _ in range(n):
abelian[rank[next(end)]] += 1
S.add(tuple(abelian))
for letter in end:
abelian[rank[letter]] += 1
abelian[rank[next(start)]] -= 1
S.add(tuple(abelian))
return S
def abelian_complexity(self, n):
r"""
Return the number of abelian vectors of factors of length ``n`` of ``self``.
EXAMPLES::
sage: w = words.FibonacciWord()[:100]
sage: [w.abelian_complexity(i) for i in range(20)]
[1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
::
sage: w = words.ThueMorseWord()[:100]
sage: [w.abelian_complexity(i) for i in range(20)]
[1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2]
"""
return len(self.abelian_vectors(n))
def sturmian_desubstitute_as_possible(self):
r"""
Sturmian-desubstitute the word ``self`` as much as possible.
The finite word ``self`` must be defined on a two-letter
alphabet or use at most two letters.
It can be Sturmian desubstituted if one letter appears
isolated: the Sturmian desubstitution consists in removing one
letter per run of the non-isolated letter. The accelerated
Sturmian desubstitution consists in removing a run equal to
the length of the shortest inner run from any run of the
non-isolated letter (including possible leading and trailing
runs even if they have shorter length). The (accelerated)
Sturmian desubstitution is done as much as possible. A word is
a factor of a Sturmian word if, and only if, the result is the
empty word.
OUTPUT:
a finite word defined on a two-letter alphabet
EXAMPLES::
sage: u = Word('10111101101110111',alphabet='01') ; u
word: 10111101101110111
sage: v = u.sturmian_desubstitute_as_possible() ; v
word: 01100101
sage: v == v.sturmian_desubstitute_as_possible()
True
sage: Word('azaazaaazaaazaazaaaz', alphabet='az').sturmian_desubstitute_as_possible()
word:
TESTS::
sage: w = Word('azazaza', alphabet='aze')
sage: w.sturmian_desubstitute_as_possible()
word:
sage: Word('aze').sturmian_desubstitute_as_possible()
Traceback (most recent call last):
...
TypeError: your word must be defined on a binary alphabet or use at most two different letters
sage: Word('azaaazaazaazaaazaaza', alphabet='az').sturmian_desubstitute_as_possible()
word:
sage: Word('azaaazaazaazaaazaaaza', alphabet='az').sturmian_desubstitute_as_possible()
word: azzaa
Boundary effects::
sage: Word('', alphabet='az').sturmian_desubstitute_as_possible()
word:
sage: Word('azzzzz', alphabet='az').sturmian_desubstitute_as_possible()
word:
sage: Word('zzzzza', alphabet='az').sturmian_desubstitute_as_possible()
word:
sage: Word('aaaaazaaaaaaaaa', alphabet='az').sturmian_desubstitute_as_possible()
word:
sage: Word('aaaaaaaaaaaaaa', alphabet='az').sturmian_desubstitute_as_possible()
word:
Boundary effects without alphabet::
sage: Word('').sturmian_desubstitute_as_possible()
word:
sage: Word('azzzzz').sturmian_desubstitute_as_possible()
word:
sage: Word('zzzzza').sturmian_desubstitute_as_possible()
word:
sage: Word('aaaaazaaaaaaaaa').sturmian_desubstitute_as_possible()
word:
sage: Word('aaaaaaaaaaaaaa').sturmian_desubstitute_as_possible()
word:
Idempotence::
sage: r = words.RandomWord(randint(1,15)).sturmian_desubstitute_as_possible() ; r == r.sturmian_desubstitute_as_possible()
True
AUTHOR:
- Thierry Monteil
"""
if self.is_empty():
return self
W = self.parent()
if W.alphabet().cardinality()== 2:
alphabet = W.alphabet()
else:
alphabet = self.letters()
if len(alphabet) > 2:
raise TypeError('your word must be defined on a binary alphabet or use at most two different letters')
elif len(alphabet) < 2:
return W()
word_from_letter = {l:W([l],datatype="list",check=False) for l in alphabet}
is_prefix = True
current_run_length = 0
prefix_length = 0
prefix_letter = self[0]
is_isolated = {alphabet[0]:True,alphabet[1]:True}
minimal_run = {alphabet[0]:Infinity,alphabet[1]:Infinity}
maximal_run = {alphabet[0]:0,alphabet[1]:0}
runs = {alphabet[0]:[],alphabet[1]:[]}
for i in self:
if is_prefix:
if i == prefix_letter:
prefix_length += 1
if prefix_length > 1:
is_isolated[i] = False
else:
is_prefix = False
current_run_length = 1
previous_letter = i
else:
if i == previous_letter:
current_run_length += 1
if current_run_length > 1:
is_isolated[i] = False
else:
runs[previous_letter].append(current_run_length)
minimal_run[previous_letter] = min(minimal_run[previous_letter],current_run_length)
maximal_run[previous_letter] = max(maximal_run[previous_letter],current_run_length)
current_run_length = 1
previous_letter = i
# at this point, previous_letter is the suffix letter and current_run_length is the suffix length
if not (is_isolated[alphabet[0]] or is_isolated[alphabet[1]]):
return self
elif is_isolated[alphabet[0]] and is_isolated[alphabet[1]]:
return W()
else:
if is_isolated[alphabet[0]]:
l_isolated = alphabet[0] #the isolated letter
l_running = alphabet[1] #the running letter (non-isolated)
else:
l_isolated = alphabet[1]
l_running = alphabet[0]
w_isolated = word_from_letter[l_isolated] #the word associated to the isolated letter
w_running = word_from_letter[l_running] #the word associated to the running letter
min_run = minimal_run[l_running]
if (prefix_letter == l_isolated) or (prefix_length <= min_run):
desubstitued_word = W()
else:
desubstitued_word = w_running ** (prefix_length - min_run)
for i in runs[l_running]:
desubstitued_word = desubstitued_word + w_isolated + w_running ** (i - min_run)
if (current_run_length > 0):
desubstitued_word = desubstitued_word + w_isolated
if (previous_letter == l_running) and (current_run_length > min_run):
desubstitued_word = desubstitued_word + w_running ** (current_run_length - min_run)
return desubstitued_word.sturmian_desubstitute_as_possible()
def is_sturmian_factor(self):
r"""
Tell whether ``self`` is a factor of a Sturmian word.
The finite word ``self`` must be defined on a two-letter alphabet.
Equivalently, tells whether ``self`` is balanced. The
advantage over the ``is_balanced`` method is that this one runs in
linear time whereas ``is_balanced`` runs in quadratic time.
OUTPUT:
boolean -- the result
EXAMPLES::
sage: w = Word('0111011011011101101',alphabet='01')
sage: w.is_sturmian_factor()
True
::
sage: words.LowerMechanicalWord(random(),alphabet='01')[:100].is_sturmian_factor()
True
sage: words.CharacteristicSturmianWord(random())[:100].is_sturmian_factor()
True
::
sage: w = Word('aabb',alphabet='ab')
sage: w.is_sturmian_factor()
False
sage: s1 = WordMorphism('a->ab,b->b')
sage: s2 = WordMorphism('a->ba,b->b')
sage: s3 = WordMorphism('a->a,b->ba')
sage: s4 = WordMorphism('a->a,b->ab')
sage: W = Words('ab')
sage: w = W('ab')
sage: for i in range(8): w = choice([s1,s2,s3,s4])(w)
sage: w
word: abaaabaaabaabaaabaaabaabaaabaabaaabaaaba...
sage: w.is_sturmian_factor()
True
Famous words::
sage: words.FibonacciWord()[:100].is_sturmian_factor()
True
sage: words.ThueMorseWord()[:1000].is_sturmian_factor()
False
sage: words.KolakoskiWord()[:1000].is_sturmian_factor()
False
REFERENCES:
.. [Arn2002] \P. Arnoux, Sturmian sequences, in Substitutions in Dynamics,
N. Pytheas Fogg (Ed.), Arithmetics, and Combinatorics (Lecture
Notes in Mathematics, Vol. 1794), 2002.
.. [Ser1985] \C. Series. The geometry of Markoff numbers. The Mathematical
Intelligencer, 7(3):20--29, 1985.
.. [SU2009] \J. Smillie and C. Ulcigrai. Symbolic coding for linear
trajectories in the regular octagon, :arxiv:`0905.0871`, 2009.
AUTHOR:
- Thierry Monteil
"""
return self.sturmian_desubstitute_as_possible().is_empty()
def is_tangent(self):
r"""
Tell whether ``self`` is a tangent word.
The finite word ``self`` must be defined on a two-letter alphabet.
A binary word is said to be *tangent* if it can appear in
infinitely many cutting sequences of a smooth curve, where each
cutting sequence is observed on a progressively smaller grid.
This class of words strictly contains the class of `1`-balanced
words, and is strictly contained in the class of `2`-balanced words.
This method runs in linear time.
OUTPUT:
boolean -- the result
EXAMPLES::
sage: w = Word('01110110110111011101',alphabet='01')
sage: w.is_tangent()
True
Some tangent words may not be balanced::
sage: Word('aabb',alphabet='ab').is_balanced()
False
sage: Word('aabb',alphabet='ab').is_tangent()
True
Some `2`-balanced words may not be tangent::
sage: Word('aaabb',alphabet='ab').is_tangent()
False
sage: Word('aaabb',alphabet='ab').is_balanced(2)
True
Famous words::
sage: words.FibonacciWord()[:100].is_tangent()
True
sage: words.ThueMorseWord()[:1000].is_tangent()
True
sage: words.KolakoskiWord()[:1000].is_tangent()
False
REFERENCES:
.. [Mon2010] \T. Monteil, The asymptotic language of smooth curves, talk
at LaCIM2010.
AUTHOR:
- Thierry Monteil
"""
if (self.parent().alphabet().cardinality() != 2):
raise TypeError('your word must be defined on a binary alphabet')
[a,b] = self.parent().alphabet()
mini = 0
maxi = 0
height = 0
for i in self.sturmian_desubstitute_as_possible():
if i == a:
height = height + 1
maxi = max(maxi , height)
if i == b:
height = height - 1
mini = min(mini , height)
return (maxi - mini <= 2)
# TODO.
# 1. Those three swap functions should use the cmp of python.
# 2. The actual code should then be copied as is in the Word_over_Alphabet
# and continue to use the parent cmp
# 3. Once Word can define Words over alphabet, the examples
# should be updated appropriately.
def swap(self, i, j=None):
r"""
Return the word `w` with entries at positions ``i`` and
``j`` swapped. By default, ``j = i+1``.
EXAMPLES::
sage: Word([1,2,3]).swap(0,2)
word: 321
sage: Word([1,2,3]).swap(1)
word: 132
sage: Word("abba").swap(1,-1)
word: aabb
"""
if j is None:
j = i+1
new = list(self)
(new[i], new[j]) = (new[j], new[i])
from sage.combinat.words.word import Word
return Word(new)
def swap_increase(self, i):
r"""
Return the word with positions ``i`` and ``i+1`` exchanged
if ``self[i] > self[i+1]``. Otherwise, it returns ``self``.
EXAMPLES::
sage: w = Word([1,3,2])
sage: w.swap_increase(1)
word: 123
sage: w.swap_increase(0)
word: 132
sage: w.swap_increase(0) is w
True
sage: Words("ab")("abba").swap_increase(0)
word: abba
sage: Words("ba")("abba").swap_increase(0)
word: baba
"""
key = self._parent.sortkey_letters
if key(self[i]) > key(self[i + 1]):
return self.swap(i)
else:
return self
def swap_decrease(self, i):
r"""
Return the word with positions ``i`` and ``i+1`` exchanged
if ``self[i] < self[i+1]``. Otherwise, it returns ``self``.
EXAMPLES::
sage: w = Word([1,3,2])
sage: w.swap_decrease(0)
word: 312
sage: w.swap_decrease(1)
word: 132
sage: w.swap_decrease(1) is w
True
sage: Words("ab")("abba").swap_decrease(0)
word: baba
sage: Words("ba")("abba").swap_decrease(0)
word: abba
"""
key = self._parent.sortkey_letters
if key(self[i]) < key(self[i + 1]):
return self.swap(i)
else:
return self
def abelian_vector(self, alphabet=None):
r"""
Return the abelian vector of ``self`` counting the occurrences of each letter.
The vector is defined w.r.t. the order of the alphabet of the
parent. See also :meth:`evaluation_dict`.
INPUT:
- ``self`` -- word having a parent on a finite alphabet
- ``alphabet`` -- *DEPRECATED*
OUTPUT:
a list
EXAMPLES::
sage: W = Words('ab')
sage: W('aaabbbbb').abelian_vector()
[3, 5]
sage: W('a').abelian_vector()
[1, 0]
sage: W().abelian_vector()
[0, 0]
The argument ``alphabet`` is deprecated::
sage: Word('aabaa').abelian_vector('abc')
doctest:...: DeprecationWarning: The argument alphabet of
methods abelian_vector and parikh_vector is deprecated and will
be removed in a future version of Sage. In order to fix this,
you must define your word on a parent with a finite alphabet.
See http://trac.sagemath.org/17058 for details.
[4, 1, 0]
You may fix the above deprecated use of the ``alphabet`` argument this way::
sage: W = Words('abc')
sage: W('aabaa').abelian_vector()
[4, 1, 0]
TESTS::
sage: W = Words()
sage: W('aabaa').abelian_vector()
Traceback (most recent call last):
...
TypeError: The alphabet of the parent is infinite; define the
word with a parent on a finite alphabet or use
evaluation_dict() instead
"""
if alphabet is None:
if self.parent().alphabet().cardinality() is Infinity:
raise TypeError("The alphabet of the parent is infinite; define "
"the word with a parent on a finite alphabet or use "
"evaluation_dict() instead")
alphabet = self.parent().alphabet()
else:
from sage.misc.superseded import deprecation
deprecation(17058, "The argument alphabet of methods abelian_vector "
"and parikh_vector is deprecated and will be "
"removed in a future version of Sage. In order to "
"fix this, you must define your word on a parent "
"with a finite alphabet.")
ev_dict = self.evaluation_dict()
return [ev_dict.get(a,0) for a in alphabet]
parikh_vector = deprecated_function_alias(17058, abelian_vector)
evaluation = abelian_vector
def robinson_schensted(self):
"""
Return the semistandard tableau and standard tableau pair
obtained by running the Robinson-Schensted algorithm on ``self``.
This can also be done by running
:func:`~sage.combinat.rsk.RSK` on ``self``.
EXAMPLES::
sage: Word([1,1,3,1,2,3,1]).robinson_schensted()
[[[1, 1, 1, 1, 3], [2], [3]], [[1, 2, 3, 5, 6], [4], [7]]]
"""
from sage.combinat.rsk import RSK
return RSK(self)
def _rsk_iter(self):
r"""
Return an iterator for :func:`~sage.combinat.rsk.RSK`.
Yields pairs `(i, w_i)` for a word `w = w_1 w_2 \cdots w_k`.
EXAMPLES::
sage: for x in Word([1,1,3,1,2,3,1])._rsk_iter(): x
...
(1, 1)
(2, 1)
(3, 3)
(4, 1)
(5, 2)
(6, 3)
(7, 1)
"""
return zip(range(1, len(self) + 1), self)
def shuffle(self, other, overlap=0):
r"""
Return the combinatorial class representing the shuffle product
between words ``self`` and ``other``. This consists of all words of length
``self.length()+other.length()`` that have both ``self`` and ``other`` as
subwords.
If ``overlap`` is non-zero, then the combinatorial class representing
the shuffle product with overlaps is returned. The calculation of
the shift in each overlap is done relative to the order of the
alphabet. For example, `a` shifted by `a` is `b` in the alphabet
`[a, b, c]` and `0` shifted by `1` in `[0, 1, 2, 3]` is `2`.
INPUT:
- ``other`` -- finite word
- ``overlap`` -- (default: ``0``) integer or ``True``
OUTPUT:
combinatorial class of shuffle product of ``self`` and ``other``
EXAMPLES::
sage: ab = Word("ab")
sage: cd = Word("cd")
sage: sp = ab.shuffle(cd); sp
Shuffle product of word: ab and word: cd
sage: sp.cardinality()
6
sage: sp.list()
[word: abcd, word: acbd, word: acdb, word: cabd, word: cadb, word: cdab]
sage: w = Word([0,1])
sage: u = Word([2,3])
sage: w.shuffle(w)
Shuffle product of word: 01 and word: 01
sage: u.shuffle(u)
Shuffle product of word: 23 and word: 23
sage: w.shuffle(u)
Shuffle product of word: 01 and word: 23
sage: w.shuffle(u,2)
Overlapping shuffle product of word: 01 and word: 23 with 2 overlaps
"""
if overlap == 0:
from sage.combinat.words.shuffle_product import ShuffleProduct_w1w2
return ShuffleProduct_w1w2(self, other)
else:
if any(a not in ZZ for a in self) or any(a not in ZZ for a in other):
raise ValueError("for a nonzero overlap, words must contain integers as letters")
if overlap is True:
from sage.combinat.words.shuffle_product import ShuffleProduct_overlapping
return ShuffleProduct_overlapping(self, other)
elif isinstance(overlap, (int,Integer)):
from sage.combinat.words.shuffle_product import ShuffleProduct_overlapping_r
return ShuffleProduct_overlapping_r(self, other, overlap)
raise ValueError('overlapping must be True or an integer')
def shifted_shuffle(self, other, shift=None):
r"""
Return the combinatorial class representing the shifted shuffle
product between words ``self`` and ``other``. This is the same as the
shuffle product of ``self`` with the word obtained from ``other`` by
incrementing its values (i.e. its letters) by the given ``shift``.
INPUT:
- ``other`` -- finite word over the integers
- ``shift`` -- integer or ``None`` (default: ``None``) added to each letter of
``other``. When ``shift`` is ``None``, it is replaced by ``self.length()``
OUTPUT:
combinatorial class of shifted shuffle products of ``self`` and ``other``
EXAMPLES::
sage: w = Word([0,1,1])
sage: sp = w.shifted_shuffle(w); sp
Shuffle product of word: 011 and word: 344
sage: sp = w.shifted_shuffle(w, 2); sp
Shuffle product of word: 011 and word: 233
sage: sp.cardinality()
20
sage: WordOptions(identifier='')
sage: sp.list()
[011233, 012133, 012313, 012331, 021133, 021313, 021331, 023113, 023131, 023311, 201133, 201313, 201331, 203113, 203131, 203311, 230113, 230131, 230311, 233011]
sage: WordOptions(identifier='word: ')
sage: y = Word('aba')
sage: y.shifted_shuffle(w,2)
Traceback (most recent call last):
...
ValueError: for shifted shuffle, words must only contain integers as letters
"""
if any(a not in ZZ for a in self) or any(a not in ZZ for a in other):
raise ValueError("for shifted shuffle, words must only contain integers as letters")
if shift is None:
from sage.combinat.words.shuffle_product import ShuffleProduct_shifted
return ShuffleProduct_shifted(self, other)
else:
return self.shuffle(self._parent([x + shift for x in other], check=False))
######################################################################
# XXX TODO: The description is inconsistent w.r.t. "W" and "alphabet".
######################################################################
def delta_inv(self, W=None, s=None):
r"""
Lift ``self`` via the delta operator to obtain a word containing the
letters in alphabet (default is ``[0, 1]``). The letters used in the
construction start with ``s`` (default is ``alphabet[0]``) and cycle
through alphabet.
INPUT:
- ``alphabet`` -- an iterable
- ``s`` -- an object in the iterable
EXAMPLES::
sage: W = Words([1, 2])
sage: W([2, 2, 1, 1]).delta_inv()
word: 112212
sage: W([1, 1, 1, 1]).delta_inv(Words('123'))
word: 1231
sage: W([2, 2, 1, 1, 2]).delta_inv(s=2)
word: 22112122
"""
alphabet = [1, 2] if W is None else W.alphabet()
cycle_alphabet = cycle(alphabet)
if self.is_empty():
return Words(alphabet)()
if s is None:
s = next(cycle_alphabet)
else:
if s not in alphabet:
raise ValueError("starting letter not in alphabet")
t = next(cycle_alphabet)
while t != s:
t = next(cycle_alphabet)
w = []
for i in self:
w.extend([s] * i)
s = next(cycle_alphabet)
return Words(alphabet)(w)
def delta(self):
r"""
Return the image of ``self`` under the delta morphism. This is the
word composed of the length of consecutive runs of the same letter
in a given word.
EXAMPLES::
sage: W = Words('0123456789')
sage: W('22112122').delta()
word: 22112
sage: W('555008').delta()
word: 321
sage: W().delta()
word:
sage: Word('aabbabaa').delta()
word: 22112
"""
if self.is_empty():
return Words()([])
ss = self[0]
c = 0
v = list()
max_c = 0
for s in self:
if s == ss:
c += 1
if c > max_c:
max_c = c
else:
v.append(c)
ss = s
c = 1
v.append(c)
return Words(list(range(1, 1 + max_c)))(v)
# TODO. Decide whether delta_derivate* really need W.alphabet().last()....
# RENAME: Should "derivate" be derivative?!
def delta_derivate(self, W=None):
r"""
Return the derivative under delta for ``self``.
EXAMPLES::
sage: W = Words('12')
sage: W('12211').delta_derivate()
word: 22
sage: W('1').delta_derivate(Words([1]))
word: 1
sage: W('2112').delta_derivate()
word: 2
sage: W('2211').delta_derivate()
word: 22
sage: W('112').delta_derivate()
word: 2
sage: W('11222').delta_derivate(Words([1, 2, 3]))
word: 3
"""
d = self.delta()
if len(d) == 0:
return d
if W is None:
W = d.parent()
if d[0] != W.alphabet().last():
d = d[1:]
if d[-1] != W.alphabet().last():
d = d[:-1]
return d
def delta_derivate_left(self, W=None):
r"""
Return the derivative under delta for ``self``.
EXAMPLES::
sage: W = Words('12')
sage: W('12211').delta_derivate_left()
word: 22
sage: W('1').delta_derivate_left(Words([1]))
word: 1
sage: W('2112').delta_derivate_left()
word: 21
sage: W('2211').delta_derivate_left()
word: 22
sage: W('112').delta_derivate_left()
word: 21
sage: W('11222').delta_derivate_left(Words([1, 2, 3]))
word: 3
"""
d = self.delta()
if len(d) == 0:
return d
if W is None:
W = d.parent()
if d[0] != W.alphabet().last():
d = d[1:]
return d
def delta_derivate_right(self, W=None):
r"""
Return the right derivative under delta for ``self``.
EXAMPLES::
sage: W = Words('12')
sage: W('12211').delta_derivate_right()
word: 122
sage: W('1').delta_derivate_right(Words([1]))
word: 1
sage: W('2112').delta_derivate_right()
word: 12
sage: W('2211').delta_derivate_right()
word: 22
sage: W('112').delta_derivate_right()
word: 2
sage: W('11222').delta_derivate_right(Words([1, 2, 3]))
word: 23
"""
d = self.delta()
if len(d) == 0:
return d
if W is None:
W = d.parent()
if d[-1] != W.alphabet().last():
d = d[:-1]
return d
def phi(self):
r"""
Apply the phi function to ``self`` and return the result. This is
the word obtained by taking the first letter of the words obtained
by iterating delta on ``self``.
OUTPUT:
a word -- the result of the phi function
EXAMPLES::
sage: W = Words([1, 2])
sage: W([2,2,1,1,2,1,2,2,1,2,2,1,1,2]).phi()
word: 222222
sage: W([2,1,2,2,1,2,2,1,2,1]).phi()
word: 212113
sage: W().phi()
word:
sage: Word([2,1,2,2,1,2,2,1,2,1]).phi()
word: 212113
sage: Word([2,3,1,1,2,1,2,3,1,2,2,3,1,2]).phi()
word: 21215
sage: Word("aabbabaabaabba").phi()
word: a22222
sage: w = Word([2,3,1,1,2,1,2,3,1,2,2,3,1,2])
REFERENCES:
- S. Brlek, A. Ladouceur, A note on differentiable palindromes,
Theoret. Comput. Sci. 302 (2003) 167--178.
- S. Brlek, S. Dulucq, A. Ladouceur, L. Vuillon, Combinatorial
properties of smooth infinite words, Theoret. Comput. Sci. 352
(2006) 306--317.
"""
if self.is_empty():
return self
v = [self[0]]
m = self.delta()
while m.length() > 1:
v.append(m[0])
m = m.delta()
v.append(m[0])
return Words()(v)
def phi_inv(self, W=None):
r"""
Apply the inverse of the phi function to ``self``.
INPUT:
- ``self`` -- a word over the integers
- ``W`` -- a parent object of words defined over integers
OUTPUT:
a word -- the inverse of the phi function
EXAMPLES::
sage: W = Words([1, 2])
sage: W([2, 2, 2, 2, 1, 2]).phi_inv()
word: 22112122
sage: W([2, 2, 2]).phi_inv(Words([2, 3]))
word: 2233
"""
if W is None:
W = self.parent()
if self.is_empty():
return W()
v = self.parent()((self[-1],), check=False)
for i in range(self.length()-2, -1, -1):
v = v.delta_inv(W, self[i])
return v
def _phi_inv_tab(self, tab):
r"""
Specialized version of ``phi_inv()`` for long or incremental words.
TESTS::
sage: Words([1, 2])([1, 1, 2, 2])._phi_inv_tab([2])
word: 12211
"""
res = self.delta_inv(s=tab[0])
res = res[1:]
for i in range(1, len(tab)):
res = res.delta_inv(s=tab[i])
return res
def is_smooth_prefix(self):
r"""
Return ``True`` if ``self`` is the prefix of a smooth word, and ``False``
otherwise.
Let `A_k = \{1, \ldots ,k\}`, `k \geq 2`. An infinite word `w` in
`A_k^\omega` is said to be *smooth* if and only if for all positive
integers `m`, `\Delta^m(w)` is in `A_k^\omega`, where `\Delta(w)` is
the word obtained from `w` by composing the length of consecutive
runs of the same letter in `w`. See for instance [1] and [2].
INPUT:
- ``self`` -- must be a word over the integers to get something other
than ``False``
OUTPUT:
boolean -- whether ``self`` is a smooth prefix or not
EXAMPLES::
sage: W = Words([1, 2])
sage: W([1, 1, 2, 2, 1, 2, 1, 1]).is_smooth_prefix()
True
sage: W([1, 2, 1, 2, 1, 2]).is_smooth_prefix()
False
REFERENCES:
- [1] S. Brlek, A. Ladouceur, A note on differentiable palindromes,
Theoret. Comput. Sci. 302 (2003) 167--178.
- [2] S. Brlek, S. Dulucq, A. Ladouceur, L. Vuillon, Combinatorial
properties of smooth infinite words, Theoret. Comput. Sci. 352
(2006) 306--317.
"""
m = self
W = self.parent()
while m.length() > 1:
m = m.delta_derivate_right()
if not all(a in W.alphabet() for a in m.letters()):
return False
return True
def letters(self):
r"""
Return the list of letters that appear in this word, listed in the
order of first appearance.
EXAMPLES::
sage: Word([0,1,1,0,1,0,0,1]).letters()
[0, 1]
sage: Word("cacao").letters()
['c', 'a', 'o']
TESTS::
sage: Word().letters()
[]
"""
seen = set()
res = []
for x in self:
if x not in seen:
res.append(x)
seen.add(x)
return res
def standard_factorization(self):
r"""
Return the standard factorization of ``self``.
The *standard factorization* of a word `w` of length greater than
`1` is the factorization `w = uv` where `v` is the longest proper
suffix of `w` that is a Lyndon word.
Note that if `w` is a Lyndon word of length greater than `1` with
standard factorization `w = uv`, then `u` and `v` are also Lyndon
words and `u < v`.
See for instance [1], [2] and [3].
INPUT:
- ``self`` -- finite word of length greater than `1`
OUTPUT:
`2`-tuple `(u, v)`
EXAMPLES::
sage: Words('01')('0010110011').standard_factorization()
(word: 001011, word: 0011)
sage: Words('123')('1223312').standard_factorization()
(word: 12233, word: 12)
sage: Word([3,2,1]).standard_factorization()
(word: 32, word: 1)
::
sage: w = Word('0010110011',alphabet='01')
sage: w.standard_factorization()
(word: 001011, word: 0011)
sage: w = Word('0010110011',alphabet='10')
sage: w.standard_factorization()
(word: 001011001, word: 1)
sage: w = Word('1223312',alphabet='123')
sage: w.standard_factorization()
(word: 12233, word: 12)
TESTS::
sage: w = Word()
sage: w.standard_factorization()
Traceback (most recent call last):
...
ValueError: Standard factorization not defined on words of
length less than 2
sage: w = Word('a')
sage: w.standard_factorization()
Traceback (most recent call last):
...
ValueError: Standard factorization not defined on words of
length less than 2
REFERENCES:
- [1] K.-T. Chen, R.H. Fox, R.C. Lyndon, Free differential calculus,
IV. The quotient groups of the lower central series, Ann. of Math.
68 (1958) 81--95.
- [2] J.-P. Duval, Factorizing words over an ordered alphabet,
J. Algorithms 4 (1983) 363--381.
- [3] M. Lothaire, Algebraic Combinatorics On Words, vol. 90 of
Encyclopedia of Mathematics and its Applications, Cambridge
University Press, U.K., 2002.
"""
selflen = self.length()
if selflen < 2:
raise ValueError("Standard factorization not defined on"
" words of length less than 2")
for l in range(1, selflen):
suff = self[l:]
if suff.is_lyndon():
return self[:l], suff
def apply_permutation_to_positions(self, permutation):
r"""
Return the word obtained by permuting the positions of the letters
in ``self`` according to the permutation ``permutation``.
EXAMPLES::
sage: w = Words('abcd')('abcd')
sage: w.apply_permutation_to_positions([2,1,4,3])
word: badc
sage: u = Words('dabc')('abcd')
sage: u.apply_permutation_to_positions([2,1,4,3])
word: badc
sage: w.apply_permutation_to_positions(Permutation([2,1,4,3]))
word: badc
sage: w.apply_permutation_to_positions(PermutationGroupElement([2,1,4,3]))
word: badc
sage: Word([1,2,3,4]).apply_permutation_to_positions([3,4,2,1])
word: 3421
"""
from sage.combinat.permutation import Permutation
from sage.groups.perm_gps.permgroup_element import PermutationGroupElement
if not isinstance(permutation, Permutation):
if isinstance(permutation, PermutationGroupElement):
permutation = Permutation(permutation.domain())
else:
permutation = Permutation(permutation)
return self.parent()(permutation.action(self), check=False)
def apply_permutation_to_letters(self, permutation):
r"""
Return the word obtained by applying the permutation
``permutation`` of the alphabet of ``self`` to each letter of
``self``.
EXAMPLES::
sage: w = Words('abcd')('abcd')
sage: p = [2,1,4,3]
sage: w.apply_permutation_to_letters(p)
word: badc
sage: u = Words('dabc')('abcd')
sage: u.apply_permutation_to_letters(p)
word: dcba
sage: w.apply_permutation_to_letters(Permutation(p))
word: badc
sage: w.apply_permutation_to_letters(PermutationGroupElement(p))
word: badc
"""
from sage.combinat.permutation import Permutation
from sage.groups.perm_gps.permgroup_element import PermutationGroupElement
if not isinstance(permutation, Permutation):
if isinstance(permutation, PermutationGroupElement):
permutation = Permutation(permutation.domain())
else:
permutation = Permutation(permutation)
alphabet = self.parent().alphabet()
morphism = dict(zip(alphabet, permutation.action(alphabet)))
return self.apply_morphism(morphism)
def colored_vector(self, x=0, y=0, width='default', height=1, cmap='hsv', thickness=1, label=None):
r"""
Return a vector (Graphics object) illustrating ``self``. Each letter
is represented by a coloured rectangle.
If the parent of ``self`` is a class of words over a finite alphabet,
then each letter in the alphabet is assigned a unique colour, and
this colour will be the same every time this method is called. This
is especially useful when plotting and comparing words defined on
the same alphabet.
If the alphabet is infinite, then the letters appearing in the word
are used as the alphabet.
INPUT:
- ``x`` -- (default: ``0``) bottom left x-coordinate of the vector
- ``y`` -- (default: ``0``) bottom left y-coordinate of the vector
- ``width`` -- (default: ``'default'``) width of the vector. By default,
the width is the length of ``self``.
- ``height`` -- (default: ``1``) height of the vector
- ``thickness`` -- (default: ``1``) thickness of the contour
- ``cmap`` -- (default: ``'hsv'``) color map; for available color map names
type: ``import matplotlib.cm; list(matplotlib.cm.datad)``
- ``label`` -- string (default: ``None``) a label to add on the colored vector
OUTPUT:
Graphics
EXAMPLES::
sage: Word(range(20)).colored_vector()
Graphics object consisting of 21 graphics primitives
sage: Word(range(100)).colored_vector(0,0,10,1)
Graphics object consisting of 101 graphics primitives
sage: Words(range(100))(range(10)).colored_vector()
Graphics object consisting of 11 graphics primitives
sage: w = Word('abbabaab')
sage: w.colored_vector()
Graphics object consisting of 9 graphics primitives
sage: w.colored_vector(cmap='autumn')
Graphics object consisting of 9 graphics primitives
sage: Word(range(20)).colored_vector(label='Rainbow')
Graphics object consisting of 23 graphics primitives
When two words are defined under the same parent, same letters are
mapped to same colors::
sage: W = Words(range(20))
sage: w = W(range(20))
sage: y = W(range(10,20))
sage: y.colored_vector(y=1, x=10) + w.colored_vector()
Graphics object consisting of 32 graphics primitives
TESTS:
The empty word::
sage: Word().colored_vector()
Graphics object consisting of 1 graphics primitive
sage: Word().colored_vector(label='empty')
Graphics object consisting of 3 graphics primitives
Unknown cmap::
sage: Word(range(100)).colored_vector(cmap='jolies')
Traceback (most recent call last):
...
RuntimeError: Color map jolies not known
sage: Word(range(100)).colored_vector(cmap='__doc__')
Traceback (most recent call last):
...
RuntimeError: Color map __doc__ not known
"""
#Recognize the color map
import matplotlib.cm as cm
from matplotlib.colors import LinearSegmentedColormap as C
key_error = False
try:
mpl_cmap = cm.__dict__[cmap]
except KeyError:
key_error = True
if key_error or not isinstance(mpl_cmap, C):
possibilities = ', '.join([str(x) for x in cm.__dict__.keys() if \
isinstance(cm.__dict__[x], C)])
import sage.misc.misc
sage.misc.misc.verbose("The possible color maps include: %s"%possibilities, level=0)
raise RuntimeError("Color map %s not known"%cmap)
#Drawing the colored vector...
from sage.plot.plot import polygon,line,text
#The default width of the vector
if width == 'default':
width = self.length()
#The black frame of the vector
ymax = y + height
L = [(x,y), (x+width,y), (x+width,ymax), (x,ymax), (x,y)]
rep = line(L, rgbcolor=(0,0,0), thickness=thickness)
#The label
if not label is None:
hl = height/2.0 # height of the label rectangle
ymax2 = ymax + hl
rep += text(str(label), (x+width/2.0, ymax + hl/2.0), rgbcolor=(1,0,0))
L = [(x,ymax), (x+width,ymax), (x+width,ymax2), (x,ymax2), (x,ymax)]
rep += line(L, rgbcolor=(0,0,0), thickness=thickness)
#base : the width of each rectangle
base = width / float(self.length()) if not self.is_empty() else None
#A colored rectangle for each letter
dim = self.parent().alphabet().cardinality()
if dim is Infinity:
ordered_alphabet = sorted(self.letters(),
key=self.parent().sortkey_letters)
dim = float(len(ordered_alphabet))
else:
ordered_alphabet = self.parent().alphabet()
dim = float(self.parent().alphabet().cardinality())
letter_to_integer_dict = dict((a,i) for (i,a) in
enumerate(ordered_alphabet))
xp = x
for a in self:
i = letter_to_integer_dict[a]
xq = xp + base
L = [(xp,y), (xq,y), (xq,ymax), (xp,ymax) ]
rgbcolor = mpl_cmap( i / dim ) [:3]
rep += polygon(L, rgbcolor = rgbcolor)
xp = xq
rep.axes(False)
return rep
def is_square(self):
r"""
Return ``True`` if ``self`` is a square, and ``False`` otherwise.
EXAMPLES::
sage: Word([1,0,0,1]).is_square()
False
sage: Word('1212').is_square()
True
sage: Word('1213').is_square()
False
sage: Word('12123').is_square()
False
sage: Word().is_square()
True
"""
if self.length() % 2 != 0:
return False
else:
l = self.length() // 2
return self[:l] == self[l:]
def is_square_free(self):
r"""
Return ``True`` if ``self`` does not contain squares, and ``False``
otherwise.
EXAMPLES::
sage: Word('12312').is_square_free()
True
sage: Word('31212').is_square_free()
False
sage: Word().is_square_free()
True
TESTS:
We make sure that :trac:`8490` is fixed::
sage: Word('11').is_square_free()
False
sage: Word('211').is_square_free()
False
sage: Word('3211').is_square_free()
False
"""
L = self.length()
if L < 2:
return True
for start in range(0, L-1):
for end in range(start+2, L+1, 2):
if self[start:end].is_square():
return False
return True
def is_cube(self):
r"""
Return ``True`` if ``self`` is a cube, and ``False`` otherwise.
EXAMPLES::
sage: Word('012012012').is_cube()
True
sage: Word('01010101').is_cube()
False
sage: Word().is_cube()
True
sage: Word('012012').is_cube()
False
"""
if self.length() % 3 != 0:
return False
l = self.length() // 3
return self[:l] == self[l:2*l] == self[2*l:]
def is_cube_free(self):
r"""
Return ``True`` if ``self`` does not contain cubes, and ``False`` otherwise.
EXAMPLES::
sage: Word('12312').is_cube_free()
True
sage: Word('32221').is_cube_free()
False
sage: Word().is_cube_free()
True
TESTS:
We make sure that :trac:`8490` is fixed::
sage: Word('111').is_cube_free()
False
sage: Word('2111').is_cube_free()
False
sage: Word('32111').is_cube_free()
False
"""
L = self.length()
if L < 3:
return True
for start in range(0, L - 2):
for end in range(start+3, L+1, 3):
if self[start:end].is_cube():
return False
return True
def to_monoid_element(self):
"""
Return ``self`` as an element of the free monoid with the same alphabet
as ``self``.
EXAMPLES::
sage: w = Word('aabb')
sage: w.to_monoid_element()
a^2*b^2
sage: W = Words('abc')
sage: w = W(w)
sage: w.to_monoid_element()
a^2*b^2
TESTS:
Check that ``w == w.to_monoid_element().to_word()``::
sage: all(w.to_monoid_element().to_word() == w for i in range(6) for w in Words('abc', i))
True
"""
from sage.monoids.free_monoid import FreeMonoid
try:
l = list(self.parent().alphabet())
except AttributeError:
l = self.letters()
M = FreeMonoid(len(l), l)
return M(self)
def is_christoffel(self):
r"""
Return ``True`` if ``self`` is a Christoffel word, and ``False`` otherwise.
The *Christoffel word* of slope `p/q` is obtained from the Cayley
graph of `\ZZ/(p+q)\ZZ` with generator `q` as follows. If `u
\rightarrow v` is an edge in the Cayley graph, then, `v = u + p
\mod{p+q}`. Let `a`,`b` be the alphabet of `w`. Label the edge
`u \rightarrow v` by `a` if `u < v` and `b` otherwise. The Christoffel
word is the word obtained by reading the edge labels along the cycle
beginning from `0`.
Equivalently, `w` is a Christoffel word iff `w` is a symmetric
non-empty word and `w[1:n-1]` is a palindrome.
See for instance [1]_ and [2]_.
INPUT:
- ``self`` -- word
OUTPUT:
boolean -- ``True`` if ``self`` is a Christoffel word,
``False`` otherwise.
EXAMPLES::
sage: Word('00100101').is_christoffel()
True
sage: Word('aab').is_christoffel()
True
sage: Word().is_christoffel()
False
sage: Word('123123123').is_christoffel()
False
sage: Word('00100').is_christoffel()
False
sage: Word('0').is_christoffel()
True
TESTS::
sage: words.LowerChristoffelWord(5,4).is_christoffel()
True
sage: words.UpperChristoffelWord(5,4).is_christoffel()
True
sage: Word('aaaaaaaaa').is_christoffel()
False
REFERENCES:
.. [1] Jean Berstel. Sturmian and episturmian words (a survey of
some recent results). In S. Bozapalidis and G. Rahonis, editors,
CAI 2007,volume 4728 of Lecture Notes in Computer Science,
pages 23-47. Springer-Verlag, 2007.
.. [2] \J. Berstel, A. Lauve, C. R., F. Saliola, Combinatorics on
words: Christoffel words and repetitions in words, CRM Monograph
Series, 27. American Mathematical Society, Providence, RI, 2009.
xii+147 pp. ISBN: 978-0-8218-4480-9
"""
if len(self) == 0 or len(self.letters()) > 2 or (self.is_palindrome() and len(self) > 1):
return False
elif self.is_symmetric() and self[1:len(self) - 1].is_palindrome():
return True
else:
return False
#######################################################################
class CallableFromListOfWords(tuple):
r"""
A class to create a callable from a list of words. The concatenation of
a list of words is obtained by creating a word from this callable.
"""
def __new__(cls, words):
r"""
TESTS::
sage: from sage.combinat.words.finite_word import CallableFromListOfWords
sage: w,u,x = Word([1,2,3]),Word([4,5]),Word([6,7,8])
sage: f = CallableFromListOfWords([w,u,x]); f
(word: 123, word: 45, word: 678)
sage: f == loads(dumps(f))
True
"""
l = []
for w in words:
from .word_infinite_datatypes import WordDatatype_callable
if isinstance(w, WordDatatype_callable) and \
isinstance(w._func, CallableFromListOfWords):
l.extend(w._func)
else:
l.append(w)
return tuple.__new__(cls, l)
def __call__(self, i):
r"""
Return the character at position ``i``.
TESTS::
sage: from sage.combinat.words.finite_word import CallableFromListOfWords
sage: w,u,x = Word([1,2,3]),Word([4,5]),Word([6,7,8])
sage: f = CallableFromListOfWords([w,u,x])
sage: [f(i) for i in range(8)]
[1, 2, 3, 4, 5, 6, 7, 8]
"""
j = i
for c in self:
if (j - c.length() < 0):
return c[j]
j -= c.length()
raise IndexError("index (=%s) out of range" % i)
class Factorization(list):
r"""
A list subclass having a nicer representation for factorization of words.
TESTS::
sage: f = sage.combinat.words.finite_word.Factorization()
sage: f == loads(dumps(f))
True
"""
def __repr__(self):
r"""
Return a string representation of the object.
TESTS::
sage: sage.combinat.words.finite_word.Factorization()
()
sage: sage.combinat.words.finite_word.Factorization([Word('ab'), Word('ba')])
(ab, ba)
"""
return '(%s)' % ', '.join(w.string_rep() for w in self)
#######################################################################
def evaluation_dict(w):
r"""
Return a dictionary keyed by the letters occurring in ``w`` with
values the number of occurrences of the letter.
INPUT:
- ``w`` -- a word
TESTS::
sage: from sage.combinat.words.finite_word import evaluation_dict
sage: evaluation_dict([2,1,4,2,3,4,2])
{1: 1, 2: 3, 3: 1, 4: 2}
sage: evaluation_dict('badbcdb')
{'a': 1, 'b': 3, 'c': 1, 'd': 2}
sage: evaluation_dict([])
{}
::
sage: evaluation_dict('1213121') # keys appear in random order
{'1': 4, '2': 2, '3': 1}
"""
d = defaultdict(int)
for a in w:
d[a] += 1
return dict(d)
| 32.52251 | 261 | 0.512122 |
bce18f6a38e67fcbf558a1e9df67a44fcca05551 | 3,444 | py | Python | tests/test_django/settings/v12.py | edavis/django-override-settings | 016a2ba44cf7132d3aeefbfeddaf201217b1d4b6 | [
"BSD-3-Clause"
] | 4 | 2015-02-06T14:50:12.000Z | 2020-05-13T07:27:08.000Z | tests/test_django/settings/v12.py | edavis/django-override-settings | 016a2ba44cf7132d3aeefbfeddaf201217b1d4b6 | [
"BSD-3-Clause"
] | null | null | null | tests/test_django/settings/v12.py | edavis/django-override-settings | 016a2ba44cf7132d3aeefbfeddaf201217b1d4b6 | [
"BSD-3-Clause"
] | 3 | 2015-10-04T23:05:24.000Z | 2020-11-23T18:47:24.000Z | # Django settings for foo project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': ':memory:', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fcz!*uhq@eng!h7-s44w0qm6n1c@(e4w1g_t@_#7_l(lsr*5zg'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'foo.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'apps.testapp',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
| 35.142857 | 122 | 0.694251 |
0352c49bfb4bcccdc512188b3249cef0fab77c14 | 4,704 | py | Python | modules/dbnd/src/dbnd/__init__.py | hugovk/dbnd | 59cd2a63a88e3bf6022bf8a4e74e6e10b183abcd | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/dbnd/__init__.py | hugovk/dbnd | 59cd2a63a88e3bf6022bf8a4e74e6e10b183abcd | [
"Apache-2.0"
] | null | null | null | modules/dbnd/src/dbnd/__init__.py | hugovk/dbnd | 59cd2a63a88e3bf6022bf8a4e74e6e10b183abcd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from dbnd._core.access import (
get_remote_engine_name,
get_task_params_defs,
get_task_params_values,
)
from dbnd._core.cli.main import (
dbnd_cmd,
dbnd_run_cmd,
dbnd_run_cmd_main,
main as dbnd_main,
)
from dbnd._core.configuration.config_path import ConfigPath
from dbnd._core.configuration.config_store import replace_section_with
from dbnd._core.configuration.config_value import default, extend, override
from dbnd._core.configuration.dbnd_config import config, config_deco
from dbnd._core.context.bootstrap import dbnd_bootstrap
from dbnd._core.context.databand_context import new_dbnd_context
from dbnd._core.current import (
current_task,
current_task_run,
dbnd_context,
get_databand_context,
get_databand_run,
)
from dbnd._core.decorator.dbnd_decorator import (
band,
data_source_pipeline,
pipeline,
task,
)
from dbnd._core.failures import dbnd_handle_errors
from dbnd._core.parameter.constants import ParameterScope
from dbnd._core.parameter.parameter_builder import data, output, parameter
from dbnd._core.parameter.parameter_definition import ParameterDefinition
from dbnd._core.plugin.dbnd_plugins import hookimpl
from dbnd._core.task.config import Config
from dbnd._core.task.data_source_task import DataSourceTask
from dbnd._core.task.pipeline_task import PipelineTask
from dbnd._core.task.python_task import PythonTask
from dbnd._core.task.task import Task
from dbnd._core.task_build import task_namespace
from dbnd._core.task_build.task_context import current
from dbnd._core.task_build.task_namespace import auto_namespace, namespace
from dbnd._core.task_build.task_registry import register_config_cls, register_task
from dbnd._core.task_ctrl.task_relations import as_task
from dbnd._core.tracking.log_data_request import LogDataRequest
from dbnd._core.tracking.metrics import (
log_artifact,
log_dataframe,
log_duration,
log_metric,
log_metrics,
log_target_operation,
)
from dbnd._core.tracking.no_tracking import dont_track
from dbnd._core.tracking.python_tracking import (
track_functions,
track_module_functions,
track_modules,
)
from dbnd._core.tracking.script_tracking_manager import (
dbnd_tracking,
dbnd_tracking_start,
dbnd_tracking_stop,
)
from dbnd._core.utils.project.project_fs import (
databand_lib_path,
databand_system_path,
project_path,
relative_path,
)
from dbnd.tasks import basics
from targets import _set_patches
from dbnd._core.configuration.environ_config import ( # isort:skip
get_dbnd_project_config,
)
get_dbnd_project_config().validate_init() # isort:skip
dbnd_config = config
__all__ = [
"hookimpl",
# context management
"new_dbnd_context",
"current",
"dbnd_context",
"current_task",
"current_task_run",
"get_databand_run",
"get_databand_context",
# inplace implementation
"dbnd_tracking",
"dbnd_tracking_start",
"dbnd_tracking_stop",
"auto_namespace",
"namespace",
"task_namespace",
"as_task",
"dont_track",
# tasks
"band",
"pipeline",
"data_source_pipeline",
"task",
"Task",
# class tasks
"Config",
"DataSourceTask",
"PipelineTask",
"PythonTask",
# parameters
"parameter",
"data",
"output",
# config
"dbnd_config",
"override",
"extend",
"config",
"config_deco",
"ConfigPath",
"ParameterDefinition",
# dbnd run cmds functions
"dbnd_main",
"dbnd_cmd",
"dbnd_run_cmd",
"dbnd_run_cmd_main",
"dbnd_handle_errors",
# metrics
"log_dataframe",
"LogDataRequest",
"log_artifact",
"log_metric",
"log_metrics",
"log_duration",
"log_target_operation",
# project paths
"project_path",
"relative_path",
"databand_lib_path",
"databand_system_path",
# bootstrap
"dbnd_bootstrap",
"_set_patches",
"track_modules",
"track_module_functions",
"track_functions",
# access helpers
"get_task_params_defs",
"get_task_params_values",
"get_remote_engine_name",
]
# validate missing __all__
# imported_vars = set(k for k in locals().keys() if not k.startswith("__"))
# print(list(imported_vars.difference(set(__all__))))
# shortcuts for useful objects
str(_set_patches) # NOQA
__version__ = "0.41.0"
__title__ = "databand"
__description__ = "Machine Learning Orchestration"
__url__ = "http://www.databand.ai/"
__uri__ = __url__
__doc__ = __description__ + " <" + __uri__ + ">"
__author__ = "Evgeny Shulman"
__email__ = "evgeny.shulman@databand.ai"
__license__ = "Commercial Licenc e"
__copyright__ = "Copyright (c) 2018 databand.ai"
| 26.27933 | 82 | 0.73852 |
dc09e6cfa5eebe2a970cd143544704b3431139f3 | 958 | py | Python | test/test_exporter.py | MattCzyr/EvacSim | d2fd191e8fb93d210b031794c274c54ebe3a1fe4 | [
"MIT"
] | 1 | 2019-11-03T16:58:28.000Z | 2019-11-03T16:58:28.000Z | test/test_exporter.py | MattCzyr/EvacuationPlanner | d2fd191e8fb93d210b031794c274c54ebe3a1fe4 | [
"MIT"
] | 12 | 2021-03-23T04:02:04.000Z | 2021-04-30T22:55:54.000Z | test/test_exporter.py | MattCzyr/EvacuationPlanner | d2fd191e8fb93d210b031794c274c54ebe3a1fe4 | [
"MIT"
] | null | null | null | import unittest
import os
import evacsim.node
import evacsim.edge
import evacsim.disaster
import evacsim.exporter
class TestExporter(unittest.TestCase):
"""Tests functionality in the exporter module. There isn't much to be tested here, so it simply tests
that a KML file with the proper name is created when the export_kml function is called."""
def test_export_kml(self):
"""Tests the export_kml function"""
nodes = {'Troy': evacsim.node.Node('Troy', 42.727453, -73.691764, 50000, 80000), 'Watervliet': evacsim.node.Node('Watervliet', 42.730389, -73.701504, 10000, 15000)}
edges = [evacsim.edge.Edge(nodes['Troy'], nodes['Watervliet'], 25, 0, 1000)]
disaster = evacsim.disaster.Disaster('Alfred')
routes = []
exp = evacsim.exporter.Exporter(nodes, edges, disaster, routes, 'test.kml')
exp.export_kml()
self.assertTrue(os.path.exists('test.kml'))
os.remove('test.kml')
| 43.545455 | 172 | 0.68476 |
a479c9062fccbba775fed24a8245c21d9ebebf6d | 192 | py | Python | 12_data_persistence/sqlalchemy_inventory_query_filter.py | lluxury/P_U_S_A | 1eb9d1fef74f9ce3618ae950f5223f598510be84 | [
"MIT"
] | null | null | null | 12_data_persistence/sqlalchemy_inventory_query_filter.py | lluxury/P_U_S_A | 1eb9d1fef74f9ce3618ae950f5223f598510be84 | [
"MIT"
] | null | null | null | 12_data_persistence/sqlalchemy_inventory_query_filter.py | lluxury/P_U_S_A | 1eb9d1fef74f9ce3618ae950f5223f598510be84 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from sqlalchemy_inventory_definition import session, OperatingSystem
for os in session.query(OperatingSystem).filter(OperatingSystem.name.like('Lin%')):
print (os)
| 27.428571 | 83 | 0.786458 |
0e8a215802959a1f51a3e288c8b58cd7535d1308 | 2,724 | py | Python | python/time-series-visualizer/time_series_visualizer.py | dbgeek/freeCodeCamp | 39a78ef914c17b751cebf46f0c28e3f75aaef2cb | [
"MIT"
] | null | null | null | python/time-series-visualizer/time_series_visualizer.py | dbgeek/freeCodeCamp | 39a78ef914c17b751cebf46f0c28e3f75aaef2cb | [
"MIT"
] | 12 | 2021-03-06T18:28:14.000Z | 2022-02-27T07:23:02.000Z | python/time-series-visualizer/time_series_visualizer.py | dbgeek/freeCodeCamp | 39a78ef914c17b751cebf46f0c28e3f75aaef2cb | [
"MIT"
] | null | null | null | import datetime
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# Import data (Make sure to parse dates. Consider setting index column to 'date'.)
df = df = pd.read_csv('fcc-forum-pageviews.csv', index_col=0, parse_dates=[0])
# Clean data
df = df.loc[
(df['value'] >= df['value'].quantile(0.025)) &
(df['value'] <= df['value'].quantile(0.975))
]
def draw_line_plot():
# Draw line plot
start_date = df.index[0].strftime("%-m/%Y")
end_date = df.index[-1].strftime("%-m/%Y")
fig, ax1 = plt.subplots(figsize=(15, 5))
plt.plot(df.index, df["value"], color="red")
ax1.set_ylabel('Page Views')
ax1.set_xlabel('Date')
ax1.set_title("Daily freeCodeCamp Forum Page Views {}-{}".format(start_date, end_date))
# Save image and return fig (don't change this part)
fig.savefig('line_plot.png')
return fig
def draw_bar_plot():
# Copy and modify data for monthly bar plot
df_bar = df.copy()
df_bar = df_bar.groupby(pd.Grouper(freq='M')).mean()
df_bar['YEAR'] = df_bar.index.strftime("%Y")
df_bar['MONTH'] = df_bar.index.strftime("%B")
months_sorted =[datetime.date(2000, m, 1).strftime('%B') for m in range(1, 13)]
# Draw bar plot
fig, ax1 = plt.subplots(figsize=(10, 10))
sns.barplot(x="YEAR", y="value", hue="MONTH",
data=df_bar, palette="bright", hue_order=months_sorted)
ax1.legend(loc='upper left', title='Months')
ax1.set_ylabel('Average Page Views')
ax1.set_xlabel('Years')
ax1.set_xticklabels(ax1.get_xticklabels(), rotation=90)
# Save image and return fig (don't change this part)
fig.savefig('bar_plot.png')
return fig
def draw_box_plot():
# Prepare data for box plots (this part is done!)
df_box = df.copy()
df_box.reset_index(inplace=True)
df_box['year'] = [d.year for d in df_box.date]
df_box['month'] = [d.strftime('%b') for d in df_box.date]
months_sorted =[datetime.date(2000, m, 1).strftime('%b') for m in range(1, 13)]
# Draw box plots (using Seaborn)
plot_objects = plt.subplots(nrows=1, ncols=2, figsize=(25, 10))
fig, ((ax1, ax2)) = plot_objects
sns.boxplot(x="year", y="value", data=df_box, ax=ax1)
sns.boxplot(x="month", y="value", data=df_box, ax=ax2, order=months_sorted)
ax1.set_ylabel('Page Views')
ax1.set_xlabel('Year')
ax2.set_ylabel('Page Views')
ax2.set_xlabel('Month')
ax1.set_title("Year-wise Box Plot (Trend)")
ax2.set_title("Month-wise Box Plot (Seasonality)")
# Save image and return fig (don't change this part)
fig.savefig('box_plot.png')
return fig
| 31.674419 | 91 | 0.659325 |
6e9334996100f25daaf534f5c27afba8272b04c3 | 3,775 | py | Python | pandas/tests/dtypes/test_generic.py | henighan/pandas | 45d8d77f27cf0dbc8cefe932f8fb64f6982b9527 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/dtypes/test_generic.py | henighan/pandas | 45d8d77f27cf0dbc8cefe932f8fb64f6982b9527 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/dtypes/test_generic.py | henighan/pandas | 45d8d77f27cf0dbc8cefe932f8fb64f6982b9527 | [
"BSD-3-Clause"
] | null | null | null | from warnings import catch_warnings
import numpy as np
from pandas.core.dtypes import generic as gt
import pandas as pd
import pandas._testing as tm
class TestABCClasses:
tuples = [[1, 2, 2], ["red", "blue", "red"]]
multi_index = pd.MultiIndex.from_arrays(tuples, names=("number", "color"))
datetime_index = pd.to_datetime(["2000/1/1", "2010/1/1"])
timedelta_index = pd.to_timedelta(np.arange(5), unit="s")
period_index = pd.period_range("2000/1/1", "2010/1/1/", freq="M")
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index)
sparse_array = pd.SparseArray(np.random.randn(10))
datetime_array = pd.core.arrays.DatetimeArray(datetime_index)
timedelta_array = pd.core.arrays.TimedeltaArray(timedelta_index)
def test_abc_types(self):
assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(["a", "b", "c"]), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period("2012", freq="A-DEC"), gt.ABCPeriod)
assert isinstance(pd.DateOffset(), gt.ABCDateOffset)
assert isinstance(pd.Period("2012", freq="A-DEC").freq, gt.ABCDateOffset)
assert not isinstance(pd.Period("2012", freq="A-DEC"), gt.ABCDateOffset)
assert isinstance(pd.Interval(0, 1.5), gt.ABCInterval)
assert not isinstance(pd.Period("2012", freq="A-DEC"), gt.ABCInterval)
assert isinstance(self.datetime_array, gt.ABCDatetimeArray)
assert not isinstance(self.datetime_index, gt.ABCDatetimeArray)
assert isinstance(self.timedelta_array, gt.ABCTimedeltaArray)
assert not isinstance(self.timedelta_index, gt.ABCTimedeltaArray)
def test_setattr_warnings():
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {
"one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),
"two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),
}
df = pd.DataFrame(d)
with catch_warnings(record=True) as w:
# successfully add new column
# this should not raise a warning
df["three"] = df.two + 1
assert len(w) == 0
assert df.three.sum() > df.two.sum()
with catch_warnings(record=True) as w:
# successfully modify column in place
# this should not raise a warning
df.one += 1
assert len(w) == 0
assert df.one.iloc[0] == 2
with catch_warnings(record=True) as w:
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
| 43.390805 | 81 | 0.654305 |
1ee500f09b0932da613b572cb04fd63908aa3185 | 3,769 | py | Python | muted/config/config.py | st093186/mute-master | 9247e422e543d9ee96d4df9133bed2cbed62ddd0 | [
"MIT"
] | null | null | null | muted/config/config.py | st093186/mute-master | 9247e422e543d9ee96d4df9133bed2cbed62ddd0 | [
"MIT"
] | null | null | null | muted/config/config.py | st093186/mute-master | 9247e422e543d9ee96d4df9133bed2cbed62ddd0 | [
"MIT"
] | null | null | null |
from __future__ import annotations
import json
from pathlib import Path
class Config:
_instance: Config = None
def __init__(self, **kwargs):
self._kwargs = kwargs
@classmethod
def instance(cls) -> Config:
if not cls._instance:
path = Path(f'.muted/muted.json')
if path.is_file():
with path.open() as fin:
cls._instance = Config(**json.load(fin))
else:
cls._instance = Config(
**{
'socket': {
'ip': '127.0.0.1',
'port': 4004
},
'root': {
'data': './.muted/data',
'log': './.muted/log'
},
'log': {
'config': 'config.json',
'file': 'muted.log'
},
'data': {
'brief': 'brief',
'desc': 'desc',
'exit': 'exit',
'genus': 'genus',
'name': 'name',
'role': 'role',
'room': 'room'
}
}
)
return cls._instance
@property
def IP(self) -> str:
return self._kwargs['socket']['ip']
@property
def PORT(self) -> int:
return self._kwargs['socket']['port']
@property
def LOG_CONFIG(self) -> str:
return (
f'{self._kwargs["root"]["log"]}/'
f'{self._kwargs["log"]["config"]}'
)
@property
def LOG_FILE(self) -> str:
return (
f'{self._kwargs["root"]["log"]}/'
f'{self._kwargs["log"]["file"]}'
)
@property
def BAGGAGE(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["baggage"]}'
)
@property
def BRIEF(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["brief"]}'
)
@property
def DESCRIPTION(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["desc"]}'
)
@property
def EXIT(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["exit"]}'
)
@property
def GENUS(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["genus"]}'
)
@property
def LEVEL(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["level"]}'
)
@property
def NAME(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["name"]}'
)
@property
def NPC(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["npc"]}'
)
@property
def PASSER(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["passer"]}'
)
@property
def ROLE(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["role"]}'
)
@property
def ROOM(self) -> str:
return (
f'{self._kwargs["root"]["data"]}/'
f'{self._kwargs["data"]["room"]}'
)
CONFIG = Config.instance()
# config.py
| 24.633987 | 60 | 0.392677 |
98b7f994bc9f913b00c88f21a6e255ef0db3cf34 | 2,424 | py | Python | userbot/plugins/fmute.py | Doom098/userbot | 11f0225a75241ab9492b1c435414c77de287b8a6 | [
"MIT"
] | 12 | 2022-01-06T19:52:48.000Z | 2022-03-06T09:05:08.000Z | userbot/plugins/fmute.py | Doom098/userbot | 11f0225a75241ab9492b1c435414c77de287b8a6 | [
"MIT"
] | null | null | null | userbot/plugins/fmute.py | Doom098/userbot | 11f0225a75241ab9492b1c435414c77de287b8a6 | [
"MIT"
] | 64 | 2022-01-06T19:55:15.000Z | 2022-03-29T21:03:01.000Z | import asyncio
from userbot.plugins.sql_helper.mute_sql import is_muted, mute, unmute
from userbot.utils import admin_cmd
# @command(outgoing=True, pattern=r"^.gmute ?(\d+)?")
@borg.on(admin_cmd(pattern=r"fmute ?(\d+)?"))
async def startgmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit("Putting Dick🍆 💦 In Son mouth!!")
await asyncio.sleep(3)
private = True
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit(
"Please reply to a user or add their into the command to fmute them."
)
event.chat_id
await event.get_chat()
if is_muted(userid, "fmute"):
return await event.edit("This user is already fmuted")
try:
mute(userid, "fmute")
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Son Can't speek now.... Filled His Mouth With Cum😉")
# @command(outgoing=True, pattern=r"^.ungmute ?(\d+)?")
@borg.on(admin_cmd(pattern=r"unfmute ?(\d+)?"))
async def endgmute(event):
private = False
if event.fwd_from:
return
elif event.is_private:
await event.edit(
"Taking Out Dick from Son mouth....\n\n Today Sex Done😁 "
)
await asyncio.sleep(3)
private = True
reply = await event.get_reply_message()
if event.pattern_match.group(1) is not None:
userid = event.pattern_match.group(1)
elif reply is not None:
userid = reply.sender_id
elif private is True:
userid = event.chat_id
else:
return await event.edit(
"Please reply to a user or add their into the command to ungmute them."
)
event.chat_id
if not is_muted(userid, "fmute"):
return await event.edit("This user is not gmuted")
try:
unmute(userid, "fmute")
except Exception as e:
await event.edit("Error occured!\nError is " + str(e))
else:
await event.edit("Son Feeling Good..... Now speak🍆🍆")
@command(incoming=True)
async def watcher(event):
if is_muted(event.sender_id, "fmute"):
await event.delete()
| 31.076923 | 83 | 0.62665 |
9166db7ca237d853b84ea08afdd957773b2f8f83 | 739 | py | Python | pypy/translator/cli/test/test_float.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | pypy/translator/cli/test/test_float.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | pypy/translator/cli/test/test_float.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
] | null | null | null | import py
from pypy.translator.cli.test.runtest import CliTest
from pypy.rpython.test.test_rfloat import BaseTestRfloat
class TestCliFloat(CliTest, BaseTestRfloat):
inf = 'Infinity'
minus_inf = '-Infinity'
nan = 'NaN'
def test_parse_float(self):
ex = ['', ' ', '0', '1', '-1.5', '1.5E2', '2.5e-1', ' 0 ', '?']
def fn(i):
s = ex[i]
try:
return float(s)
except ValueError:
return -999.0
for i in range(len(ex)):
expected = fn(i)
res = self.interpret(fn, [i])
assert res == expected
def test_r_singlefloat(self):
py.test.skip("not implemented: single-precision floats")
| 27.37037 | 74 | 0.537212 |
d6d6732aa091ed2da9ad8809f8be27d92fc0ee2e | 1,995 | py | Python | tests/cli/test_wait.py | stsievert/htmap | 70509481299edce1b38fb5cf7cf10393b6bd7392 | [
"Apache-2.0"
] | null | null | null | tests/cli/test_wait.py | stsievert/htmap | 70509481299edce1b38fb5cf7cf10393b6bd7392 | [
"Apache-2.0"
] | null | null | null | tests/cli/test_wait.py | stsievert/htmap | 70509481299edce1b38fb5cf7cf10393b6bd7392 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import htmap
def test_map_is_done_after_wait(cli):
m = htmap.map(str, range(1))
result = cli(['wait', m.tag])
assert result.exit_code == 0
assert m.is_done
def test_maps_are_done_after_wait(cli):
maps = [
htmap.map(str, range(1)),
htmap.map(str, range(1)),
]
result = cli(['wait', *(m.tag for m in maps)])
assert result.exit_code == 0
assert all(m.is_done for m in maps)
def test_maps_are_done_after_wait_using_all(cli):
maps = [
htmap.map(str, range(1)),
htmap.map(str, range(1)),
]
result = cli(['wait', '--all'])
assert result.exit_code == 0
assert all(m.is_done for m in maps)
def test_maps_wait_message_has_all_tags(cli):
maps = [
htmap.map(str, range(1)),
htmap.map(str, range(1)),
]
result = cli(['wait', *(m.tag for m in maps)])
assert result.exit_code == 0
assert all(m.tag in result.output for m in maps)
def test_maps_wait_message_has_all_tags_using_all(cli):
maps = [
htmap.map(str, range(1)),
htmap.map(str, range(1)),
]
result = cli(['wait', '--all'])
assert result.exit_code == 0
assert all(m.tag in result.output for m in maps)
def test_can_wait_with_no_maps(cli):
result = cli(['wait', '--all'])
assert result.exit_code == 0
| 24.329268 | 74 | 0.661654 |
41e1817354d182c76f034d4bd2e6ded328f64db2 | 601 | py | Python | Hackerrank_problems/Modified Kaprekar Numbers/solution.py | KAHund/CompetitiveCode | 6ed211a2f795569f5c2f18c2f660520d99d41ca0 | [
"MIT"
] | 165 | 2020-10-03T08:01:11.000Z | 2022-03-31T02:42:08.000Z | Hackerrank_problems/Modified Kaprekar Numbers/solution.py | KAHund/CompetitiveCode | 6ed211a2f795569f5c2f18c2f660520d99d41ca0 | [
"MIT"
] | 383 | 2020-10-03T07:39:11.000Z | 2021-11-20T07:06:35.000Z | Hackerrank_problems/Modified Kaprekar Numbers/solution.py | KAHund/CompetitiveCode | 6ed211a2f795569f5c2f18c2f660520d99d41ca0 | [
"MIT"
] | 380 | 2020-10-03T08:05:04.000Z | 2022-03-19T06:56:59.000Z | def check(i):
sq = str(i**2) # squaring the input
le = len(str(i)) # calculating the length
r = sq[-le:] # extracting the right hand part
l = sq[:-le] or '0' # extracting the left hand part
return sum(map(int,(l,r)))==i # sum the right and left part and checking if equals to the input number
def kaprekarNumbers(p, q):
return [i for i in range(p,q+1) if check(i)]
p = int(input())
q = int(input())
print(*kaprekarNumbers(p, q) or ["INVALID RANGE"]) | 46.230769 | 157 | 0.50416 |
da217aa3906f14cd12d0b4dc272adc542e5ec906 | 2,093 | py | Python | Flv/utils.py | wuli133144/video_parse | 0233922b20a2cd19144173ae752b070276bbc498 | [
"Unlicense"
] | null | null | null | Flv/utils.py | wuli133144/video_parse | 0233922b20a2cd19144173ae752b070276bbc498 | [
"Unlicense"
] | null | null | null | Flv/utils.py | wuli133144/video_parse | 0233922b20a2cd19144173ae752b070276bbc498 | [
"Unlicense"
] | null | null | null |
FLV_HEADER_SIZE =9
FLV_TAG_SIZE =11
FLV_PRE_LEN =4
FLV_FORMAT_SCRIPT =18
FLV_FORMAT_AUDEO =8
FLV_FORMAT_VIDEO =9
#############audio#############################
def get_audio_type(type):
if isinstance(type,int) is False:
return None
if type ==10:
return "AAC"
elif type ==2:
return "MP3"
elif type == 0:
return "Linear PCM,platform endian"
elif type ==1:
return "ADPCM"
elif type == 3:
return " Linear PCM,little endian"
elif type ==4:
return "Nellymoser 16-kHz mono"
elif type ==5:
return "Nellymoser 8-kHz mono"
elif type == 6:
return " Nellymoser"
elif type ==7:
return "G.711 A-law logarithmic PCM"
elif type ==8:
return "G.711 mu-law logarithmic PCM"
elif type == 9:
return "reserved"
elif type ==14:
return " MP3 8-Khz"
elif type == 15:
return " Device-specific sound"
pass
def get_audio_samplerate(rate):
if isinstance(type, int) is False:
return None
if rate == 0:
return 5.5
elif rate == 1:
return 11
elif rate == 2:
return 22
elif rate == 3:
return 44
def get_audio_exactly(rate):
if isinstance(type, int) is False:
return None
if rate == 0:
return 8
elif rate == 1:
return 16
def get_audio_class(cs):
if isinstance(type, int) is False:
return None
if cs == 0:
return "sndMono"
elif cs == 1:
return "sndStereo"
##############################################
def get_video_type(type):
if isinstance(type, int) is False:
return None
if type == 1:
return " keyframe (for AVC,a seekable frame)"
elif type == 2:
return " inter frame (for AVC,a nonseekable frame)"
elif type == 3:
return " disposable inter frame (H.263 only)"
elif type == 4:
return " generated keyframe (reserved for server use)"
elif type == 5:
return " video info/command frame" | 22.505376 | 62 | 0.540373 |
d15f23e8b6f3e1f4e09dc73768f8ef98a1e5f6b1 | 12,187 | py | Python | openslides/core/config_variables.py | ApolloLV/OpenSlides | ba3c5e07f7d16133a74fc6a57070f074655aa71c | [
"MIT"
] | null | null | null | openslides/core/config_variables.py | ApolloLV/OpenSlides | ba3c5e07f7d16133a74fc6a57070f074655aa71c | [
"MIT"
] | null | null | null | openslides/core/config_variables.py | ApolloLV/OpenSlides | ba3c5e07f7d16133a74fc6a57070f074655aa71c | [
"MIT"
] | null | null | null | import uuid
from django.core.validators import MaxLengthValidator
from openslides.core.config import ConfigVariable
def get_config_variables():
"""
Generator which yields all config variables of this app.
There are two main groups: 'General' and 'Projector'. The group 'General'
has subgroups. The generator has to be evaluated during app loading
(see apps.py).
"""
yield ConfigVariable(
name="general_event_name",
default_value="OpenSlides",
label="Event name",
weight=110,
subgroup="Event",
validators=(MaxLengthValidator(100),),
)
yield ConfigVariable(
name="general_event_description",
default_value="Presentation and assembly system",
label="Short description of event",
weight=115,
subgroup="Event",
validators=(MaxLengthValidator(100),),
)
yield ConfigVariable(
name="general_event_date",
default_value="",
label="Event date",
weight=120,
subgroup="Event",
)
yield ConfigVariable(
name="general_event_location",
default_value="",
label="Event location",
weight=125,
subgroup="Event",
)
yield ConfigVariable(
name="general_event_legal_notice",
default_value='<a href="http://www.openslides.org">OpenSlides</a> is a '
"free web based presentation and assembly system for "
"visualizing and controlling agenda, motions and "
"elections of an assembly.",
input_type="markupText",
label="Legal notice",
weight=131,
subgroup="Event",
hidden=True,
)
yield ConfigVariable(
name="general_event_privacy_policy",
default_value="",
input_type="markupText",
label="Privacy policy",
weight=132,
subgroup="Event",
hidden=True,
)
yield ConfigVariable(
name="general_event_welcome_title",
default_value="Welcome to OpenSlides",
label="Front page title",
weight=133,
subgroup="Event",
hidden=True,
)
yield ConfigVariable(
name="general_event_welcome_text",
default_value="[Space for your welcome text.]",
input_type="markupText",
label="Front page text",
weight=134,
subgroup="Event",
hidden=True,
)
# Live conference
yield ConfigVariable(
name="general_system_conference_show",
default_value=False,
input_type="boolean",
label="Show live conference window",
help_text="Server settings required to activate Jitsi Meet integration.",
weight=140,
subgroup="Live conference",
)
yield ConfigVariable(
name="general_system_conference_auto_connect",
default_value=False,
input_type="boolean",
label="Connect all users to live conference automatically",
help_text="Server settings required to activate Jitsi Meet integration.",
weight=141,
subgroup="Live conference",
)
yield ConfigVariable(
name="general_system_conference_los_restriction",
default_value=False,
input_type="boolean",
label="Allow only current speakers and list of speakers managers to enter the live conference",
help_text="Server settings required to activate Jitsi Meet integration.",
weight=142,
subgroup="Live conference",
)
yield ConfigVariable(
name="general_system_stream_url",
default_value="",
label="Livestream url",
help_text="Remove URL to deactivate livestream. Check extra group permission to see livestream.",
weight=143,
subgroup="Live conference",
)
# General System
yield ConfigVariable(
name="general_system_enable_anonymous",
default_value=False,
input_type="boolean",
label="Allow access for anonymous guest users",
weight=150,
subgroup="System",
)
yield ConfigVariable(
name="general_login_info_text",
default_value="",
label="Show this text on the login page",
weight=152,
subgroup="System",
)
yield ConfigVariable(
name="openslides_theme",
default_value="openslides-default-light",
input_type="choice",
label="OpenSlides Theme",
choices=(
{
"value": "openslides-default-light-theme",
"display_name": "OpenSlides Default",
},
{
"value": "openslides-default-dark-theme",
"display_name": "OpenSlides Dark",
},
{"value": "openslides-red-light-theme", "display_name": "OpenSlides Red"},
{
"value": "openslides-red-dark-theme",
"display_name": "OpenSlides Red Dark",
},
{
"value": "openslides-green-light-theme",
"display_name": "OpenSlides Green",
},
{
"value": "openslides-green-dark-theme",
"display_name": "OpenSlides Green Dark",
},
{
"value": "openslides-solarized-dark-theme",
"display_name": "OpenSlides Solarized",
},
),
weight=154,
subgroup="System",
)
# General export settings
yield ConfigVariable(
name="general_csv_separator",
default_value=",",
label="Separator used for all csv exports and examples",
weight=160,
subgroup="Export",
)
yield ConfigVariable(
name="general_csv_encoding",
default_value="utf-8",
input_type="choice",
label="Default encoding for all csv exports",
choices=(
{"value": "utf-8", "display_name": "UTF-8"},
{"value": "iso-8859-15", "display_name": "ISO-8859-15"},
),
weight=162,
subgroup="Export",
)
yield ConfigVariable(
name="general_export_pdf_pagenumber_alignment",
default_value="center",
input_type="choice",
label="Page number alignment in PDF",
choices=(
{"value": "left", "display_name": "Left"},
{"value": "center", "display_name": "Center"},
{"value": "right", "display_name": "Right"},
),
weight=164,
subgroup="Export",
)
yield ConfigVariable(
name="general_export_pdf_fontsize",
default_value="10",
input_type="choice",
label="Standard font size in PDF",
choices=(
{"value": "10", "display_name": "10"},
{"value": "11", "display_name": "11"},
{"value": "12", "display_name": "12"},
),
weight=166,
subgroup="Export",
)
yield ConfigVariable(
name="general_export_pdf_pagesize",
default_value="A4",
input_type="choice",
label="Standard page size in PDF",
choices=(
{"value": "A4", "display_name": "DIN A4"},
{"value": "A5", "display_name": "DIN A5"},
),
weight=168,
subgroup="Export",
)
# Logos
yield ConfigVariable(
name="logos_available",
default_value=[
"logo_projector_main",
"logo_projector_header",
"logo_web_header",
"logo_pdf_header_L",
"logo_pdf_header_R",
"logo_pdf_footer_L",
"logo_pdf_footer_R",
"logo_pdf_ballot_paper",
],
weight=300,
group="Logo",
hidden=True,
)
yield ConfigVariable(
name="logo_projector_main",
default_value={"display_name": "Projector logo", "path": ""},
input_type="static",
weight=301,
group="Logo",
hidden=True,
)
yield ConfigVariable(
name="logo_projector_header",
default_value={"display_name": "Projector header image", "path": ""},
input_type="static",
weight=302,
group="Logo",
hidden=True,
)
yield ConfigVariable(
name="logo_web_header",
default_value={"display_name": "Web interface header logo", "path": ""},
input_type="static",
weight=303,
group="Logo",
hidden=True,
)
# PDF logos
yield ConfigVariable(
name="logo_pdf_header_L",
default_value={"display_name": "PDF header logo (left)", "path": ""},
input_type="static",
weight=310,
group="Logo",
hidden=True,
)
yield ConfigVariable(
name="logo_pdf_header_R",
default_value={"display_name": "PDF header logo (right)", "path": ""},
input_type="static",
weight=311,
group="Logo",
hidden=True,
)
yield ConfigVariable(
name="logo_pdf_footer_L",
default_value={"display_name": "PDF footer logo (left)", "path": ""},
input_type="static",
weight=312,
group="Logo",
hidden=True,
)
yield ConfigVariable(
name="logo_pdf_footer_R",
default_value={"display_name": "PDF footer logo (right)", "path": ""},
input_type="static",
weight=313,
group="Logo",
hidden=True,
)
yield ConfigVariable(
name="logo_pdf_ballot_paper",
default_value={"display_name": "PDF ballot paper logo", "path": ""},
input_type="static",
weight=314,
group="Logo",
hidden=True,
)
# Fonts
yield ConfigVariable(
name="fonts_available",
default_value=[
"font_regular",
"font_italic",
"font_bold",
"font_bold_italic",
"font_monospace",
],
weight=320,
group="Font",
hidden=True,
)
yield ConfigVariable(
name="font_regular",
default_value={
"display_name": "Font regular",
"default": "assets/fonts/fira-sans-latin-400.woff",
"path": "",
},
input_type="static",
weight=321,
group="Font",
hidden=True,
)
yield ConfigVariable(
name="font_italic",
default_value={
"display_name": "Font italic",
"default": "assets/fonts/fira-sans-latin-400italic.woff",
"path": "",
},
input_type="static",
weight=321,
group="Font",
hidden=True,
)
yield ConfigVariable(
name="font_bold",
default_value={
"display_name": "Font bold",
"default": "assets/fonts/fira-sans-latin-500.woff",
"path": "",
},
input_type="static",
weight=321,
group="Font",
hidden=True,
)
yield ConfigVariable(
name="font_bold_italic",
default_value={
"display_name": "Font bold italic",
"default": "assets/fonts/fira-sans-latin-500italic.woff",
"path": "",
},
input_type="static",
weight=321,
group="Font",
hidden=True,
)
yield ConfigVariable(
name="font_monospace",
default_value={
"display_name": "Font monospace",
"default": "assets/fonts/roboto-condensed-bold.woff",
"path": "",
},
input_type="static",
weight=321,
group="Font",
hidden=True,
)
# Custom translations
yield ConfigVariable(
name="translations",
label="Custom translations",
default_value=[],
input_type="translations",
weight=1000,
group="Custom translations",
)
# Config version and DB id
yield ConfigVariable(
name="config_version",
input_type="integer",
default_value=1,
group="Version",
hidden=True,
)
yield ConfigVariable(
name="db_id",
input_type="string",
default_value=uuid.uuid4().hex,
group="Version",
hidden=True,
)
| 27.203125 | 105 | 0.556084 |
4e0fc622dcd57dc10e93d132d669d009da85412f | 2,986 | py | Python | app.py | cs01/python-online-disassembler | bf01605b591d09f4546003ced1a6c72d0e453b39 | [
"MIT"
] | 5 | 2018-08-11T06:49:59.000Z | 2020-02-28T13:29:52.000Z | app.py | cs01/python-online-disassembler | bf01605b591d09f4546003ced1a6c72d0e453b39 | [
"MIT"
] | null | null | null | app.py | cs01/python-online-disassembler | bf01605b591d09f4546003ced1a6c72d0e453b39 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import json
import base64
import lzma
import black
import dis
from flask import (
Flask,
render_template,
request,
send_from_directory,
render_template_string,
redirect,
url_for,
)
app = Flask(__name__)
def compress_state(data):
compressed = lzma.compress(json.dumps(data).encode("utf-8"))
return base64.urlsafe_b64encode(compressed).decode("utf-8")
def decompress_state(state):
compressed = base64.urlsafe_b64decode(state)
return json.loads(lzma.decompress(compressed))
def format_code(source, line_length, skip_string_normalization, py36, pyi):
try:
mode = black.FileMode.from_configuration(
py36=py36,
pyi=pyi,
skip_string_normalization=skip_string_normalization)
formatted = black.format_str(
source, line_length=line_length, mode=mode)
except Exception as exc:
formatted = exc
return formatted
@app.route("/favicon.ico")
def favicon():
return send_from_directory(
os.path.join(app.root_path, "static"), "favicon.ico")
@app.route("/", methods=["POST", "GET"])
def index():
if request.method == "POST":
source = request.form["source"]
line_length = int(request.form["line_length"])
skip_string_normalization = bool(
request.form.get("skip_string_normalization"))
py36 = bool(request.form.get("py36"))
pyi = bool(request.form.get("pyi"))
state = compress_state({
"sc": source,
"ll": line_length,
"ssn": skip_string_normalization,
"py36": py36,
"pyi": pyi,
})
return redirect(url_for(".index", state=state))
state = request.args.get("state")
if not state:
source = render_template("source.py")
line_length = 60
skip_string_normalization = False
py36 = False
pyi = False
state = compress_state({
"sc": source,
"ll": line_length,
"ssn": skip_string_normalization,
"py36": py36,
"pyi": pyi,
})
return redirect(url_for(".index", state=state))
state = decompress_state(state)
source = state.get("sc")
line_length = state.get("ll")
skip_string_normalization = state.get("ssn")
py36 = state.get("py36")
pyi = state.get("pyi")
try:
bytecode = dis.code_info(source) + "\n\n\n" + dis.Bytecode(
source).dis()
except SyntaxError as e:
bytecode = str(e)
data = {
"source_code": source,
"bytecode": bytecode,
"options": {
"line_length": line_length,
"skip_string_normalization": skip_string_normalization,
"py36": py36,
"pyi": pyi,
},
"black_version": black.__version__,
}
return render_template("index.html", **data)
if __name__ == "__main__":
app.run(debug=True)
| 24.677686 | 75 | 0.600469 |
f019dbe9fff92b432f48617630248843c0c59e53 | 11,065 | py | Python | version_0.2.0/static_tpo_slider.py | StreamAlpha/tpo_project | 08c6dfee75684be2539a53433b79525ed986fbba | [
"MIT"
] | 69 | 2020-08-02T03:00:34.000Z | 2022-03-17T20:42:28.000Z | version_0.2.0/static_tpo_slider.py | anojangra/tpo_project | 08c6dfee75684be2539a53433b79525ed986fbba | [
"MIT"
] | 5 | 2020-08-02T10:53:00.000Z | 2021-09-15T07:22:05.000Z | version_0.2.0/static_tpo_slider.py | anojangra/tpo_project | 08c6dfee75684be2539a53433b79525ed986fbba | [
"MIT"
] | 29 | 2020-08-01T22:26:41.000Z | 2022-03-05T08:21:01.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 2 07:02:43 2020
@author: alex1
twitter.com/beinghorizontal
"""
import pandas as pd
import plotly.graph_objects as go
from tpo_helper2 import get_ticksize, abc, get_mean, get_rf, get_context, get_dayrank, get_ibrank
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
app = dash.Dash(__name__)
display = 5
freq = 30
avglen = 10 # num days mean to get values
mode = 'tpo' # for volume --> 'vol'
dfhist = pd.read_csv(r'C:\Users\alex1\Dropbox\scripts\tpo_v2\history.txt')
dfhist.iloc[:, 2:] = dfhist.iloc[:, 2:].apply(pd.to_numeric)
def datetime(data):
dfhist = data.copy()
dfhist['datetime2'] = pd.to_datetime(dfhist['datetime'], format='%Y%m%d %H:%M:%S')
dfhist = dfhist.set_index(dfhist['datetime2'], drop=True, inplace=False)
return dfhist
dfhist = datetime(dfhist)
ticksz = get_ticksize(dfhist, freq=freq)
symbol = dfhist.symbol[0]
mean_val = get_mean(dfhist, avglen=avglen, freq=freq)
trading_hr = mean_val['session_hr']
# !!! get rotational factor again for 30 min resampled data
dfhist = get_rf(dfhist.copy())
dfhist = dfhist.resample(str(freq) + 'min').agg({'symbol': 'last', 'datetime': 'first', 'Open': 'first', 'High': 'max',
'Low': 'min', 'Close': 'last', 'Volume': 'sum', 'rf': 'sum'})
dfhist = dfhist.dropna()
dfnflist = [group[1] for group in dfhist.groupby(dfhist.index.date)]
dates = []
for d in range(0, len(dfnflist)):
dates.append(dfnflist[d].datetime[-1])
date_mark = {str(h): {'label': str(h), 'style': {'color': 'blue', 'fontsize': '4', 'text-orientation': 'upright'}}
for h in range(0, len(dates))}
app.layout = html.Div([
# adding a plot
dcc.Graph(id = 'beinghorizontal'),
# range slider
html.P([
html.Label("Days to display"),
dcc.RangeSlider(id = 'slider',
pushable=display,
marks = date_mark,
min = 0,
max = len(dates)-1,
step = None,
value = [len(dates)-(display+1), len(dates)-1])
], style = {'width' : '80%',
'fontSize' : '14px',
'padding-left' : '100px',
'display': 'inline-block'})
])
@app.callback(Output('beinghorizontal', 'figure'),
[Input('slider', 'value')])
def update_figure(value):
dfresample = dfhist[(dfhist.datetime > dates[value[0]]) & (dfhist.datetime < dates[value[1]])]
DFList = [group[1] for group in dfresample.groupby(dfresample.index.date)]
# !!! for context based bubbles at the top with text hovers
dfcontext = get_context(dfresample, freq=freq, ticksize=ticksz, style=mode, session_hr=trading_hr)
# get market profile DataFrame and ranking as a series for each day.
# @todo: IN next version, display the ranking DataFrame with drop-down menu
dfmp_list = dfcontext[0]
df_distribution = dfcontext[1]
df_ranking = get_dayrank(df_distribution.copy(), mean_val)
ranking = df_ranking[0]
power1 = ranking.power1 # Non-normalised IB strength
power = ranking.power # Normalised IB strength for dynamic shape size for markers at bottom
breakdown = df_ranking[1]
dh_list = ranking.highd
dl_list = ranking.lowd
# IB is 1st 1 hour of the session. Not useful for scrips with global 24 x 7 session
context_ibdf = get_ibrank(mean_val, ranking)
ibpower1 = context_ibdf[0].ibpower1 # Non-normalised IB strength
ibpower = context_ibdf[0].IB_power # Normalised IB strength for dynamic shape size for markers at bottom
ibbreakdown = context_ibdf[1]
ib_high_list = context_ibdf[0].ibh
ib_low_list = context_ibdf[0].ibl
fig = go.Figure(data=[go.Candlestick(x=dfresample['datetime'],
open=dfresample['Open'],
high=dfresample['High'],
low=dfresample['Low'],
close=dfresample['Close'],
showlegend=True,
name=symbol, opacity=0.3)]) # To make candlesticks more prominent increase the opacity
for i in range(len(dfmp_list)): # test the loop with i=1
df1 = DFList[i].copy()
df_mp = dfmp_list[i]
irank = ranking.iloc[i] # select single row from ranking df
df_mp['i_date'] = irank.date
# # @todo: background color for text
df_mp['color'] = np.where(np.logical_and(
df_mp['close'] > irank.vallist, df_mp['close'] < irank.vahlist), 'green', 'white')
df_mp = df_mp.set_index('i_date', inplace=False)
fig.add_trace(go.Scattergl(x=df_mp.index, y=df_mp.close, mode="text", name=str(df_mp.index[0]), text=df_mp.alphabets,
showlegend=False, textposition="top right", textfont=dict(family="verdana", size=6, color=df_mp.color)))
if power1[i] < 0:
my_rgb = 'rgba({power}, 3, 252, 0.5)'.format(power=abs(165))
else:
my_rgb = 'rgba(23, {power}, 3, 0.5)'.format(power=abs(252))
brk_f_list_maj = []
f = 0
for f in range(len(breakdown.columns)):
brk_f_list_min=[]
for index, rows in breakdown.iterrows():
brk_f_list_min.append(index+str(': ')+str(rows[f])+'<br />')
brk_f_list_maj.append(brk_f_list_min)
breakdown_values ='' # for bubbles
for st in brk_f_list_maj[i]:
breakdown_values += st
# .........................
ibrk_f_list_maj = []
g = 0
for g in range(len(ibbreakdown.columns)):
ibrk_f_list_min=[]
for index, rows in ibbreakdown.iterrows():
ibrk_f_list_min.append(index+str(': ')+str(rows[g])+'<br />')
ibrk_f_list_maj.append(ibrk_f_list_min)
ibreakdown_values = '' # for squares
for ist in ibrk_f_list_maj[i]:
ibreakdown_values += ist
# ..................................
fig.add_trace(go.Scattergl(
# x=[df1.iloc[4]['datetime']],
x=[irank.date],
y=[dfresample['High'].max()],
mode="markers",
marker=dict(color=my_rgb, size=0.90*power[i],
line=dict(color='rgb(17, 17, 17)', width=2)),
# marker_symbol='square',
hovertext=['<br />Insights:<br />VAH: {}<br /> POC: {}<br /> VAL: {}<br /> Balance Target: {}<br /> Day Type: {}<br />strength: {}<br />BreakDown: {}<br />{}<br />{}'.format(irank.vahlist,
irank.poclist, irank.vallist,irank.btlist, irank.daytype, irank.power,'','-------------------',breakdown_values)], showlegend=False))
if ibpower1[i] < 0:
ib_rgb = 'rgba(165, 3, 252, 0.5)'
else:
ib_rgb = 'rgba(23, 252, 3, 0.5)'
fig.add_trace(go.Scattergl(
# x=[df1.iloc[4]['datetime']],
x=[irank.date],
y=[dfresample['Low'].min()],
mode="markers",
marker=dict(color=ib_rgb, size=0.40 * ibpower[i], line=dict(color='rgb(17, 17, 17)', width=2)),
marker_symbol='square',
hovertext=['<br />Insights:<br />Vol_mean: {}<br /> Vol_Daily: {}<br /> RF_mean: {}<br /> RF_daily: {}<br /> IBvol_mean: {}<br /> IBvol_day: {}<br /> IB_RFmean: {}<br /> IB_RFday: {}<br />strength: {}<br />BreakDown: {}<br />{}<br />{}'.format(mean_val['volume_mean'],irank.volumed, mean_val['rf_mean'],irank.rfd,
mean_val['volib_mean'], irank.ibvol, mean_val['ibrf_mean'],irank.ibrf, ibpower[i],'','......................',ibreakdown_values)],showlegend=False))
lvns = irank.lvnlist
for lvn in lvns:
if lvn > irank.vallist and lvn < irank.vahlist:
fig.add_shape(
# Line Horizontal
type="line",
x0=df1.iloc[0]['datetime'],
y0=lvn,
x1=df1.iloc[5]['datetime'],
y1=lvn,
line=dict(
color="darksalmon",
width=2,
dash="dashdot",),)
fig.add_shape(
# Line Horizontal
type="line",
x0=df1.iloc[0]['datetime'],
y0=ib_low_list[i],
x1=df1.iloc[0]['datetime'],
y1=ib_high_list[i],
line=dict(
color="cyan",
width=3,
),)
# day high and low
fig.add_shape(
# Line Horizontal
type="line",
x0=df1.iloc[0]['datetime'],
y0=dl_list[i],
x1=df1.iloc[0]['datetime'],
y1=dh_list[i],
line=dict(
color="gray",
width=1,
dash="dashdot",),)
ltp = dfresample.iloc[-1]['Close']
if ltp >= irank.poclist:
ltp_color = 'green'
else:
ltp_color = 'red'
fig.add_trace(go.Scatter(
x=[df1.iloc[-1]['datetime']],
y=[df1.iloc[-1]['Close']],
mode="text",
name="last traded price",
text=['last '+str(df1.iloc[-1]['Close'])],
textposition="bottom right",
textfont=dict(size=11, color=ltp_color),
showlegend=False
))
fig.layout.xaxis.color = 'white'
fig.layout.yaxis.color = 'white'
fig.layout.autosize = True
fig["layout"]["height"] = 550
fig.update_xaxes(title_text='Time', title_font=dict(size=18, color='white'),
tickangle=45, tickfont=dict(size=8, color='white'), showgrid=False, dtick=len(dfmp_list))
fig.update_yaxes(title_text=symbol, title_font=dict(size=18, color='white'),
tickfont=dict(size=12, color='white'), showgrid=False)
fig.layout.update(template="plotly_dark", title="@"+abc()[1], autosize=True,
xaxis=dict(showline=True, color='white'), yaxis=dict(showline=True, color='white',autorange= True,fixedrange=False))
fig["layout"]["xaxis"]["rangeslider"]["visible"] = False
fig["layout"]["xaxis"]["tickformat"] = "%H:%M:%S"
return fig
if __name__ == '__main__':
app.run_server(debug = False)
| 41.597744 | 335 | 0.522639 |
cfb8f8a41cd636d386a81b87316ea31f9f5bc9aa | 2,982 | py | Python | bear/utils/testing.py | tgsmith61591/bear | 153fc6e8cb01427958a949eab0a270110d8044e1 | [
"MIT"
] | 1 | 2018-10-31T01:56:18.000Z | 2018-10-31T01:56:18.000Z | bear/utils/testing.py | tgsmith61591/bear | 153fc6e8cb01427958a949eab0a270110d8044e1 | [
"MIT"
] | 1 | 2018-10-24T18:29:14.000Z | 2018-10-24T18:29:14.000Z | bear/utils/testing.py | tgsmith61591/bear | 153fc6e8cb01427958a949eab0a270110d8044e1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Testing utilities
from __future__ import absolute_import
import os
import six
from os.path import join
import shutil
from functools import wraps
__all__ = ['make_and_cleanup_project_path',
'with_temporary_file',
'SimulatedNamespace']
def make_and_cleanup_project_path(project_path, *subdirectories):
r"""Manage the project-level directory for tests.
Create the project-level directory before running the test,
and then clean it up after the test. This is all managed within a
try/finally so that the tests don't have to handle the pattern.
Parameters
----------
project_path : str or unicode
The project-level directory for testing. This should not exist prior
to the function call.
*subdirectories : varargs
The subdirectories to be created under ``project_path``.
Notes
-----
Every file created should land inside of the ``project_path`` to ensure
artifacts are properly cleaned up.
"""
def func_wrapper(func):
@wraps(func)
def test_wrapper(*args, **kwargs):
assert not os.path.exists(project_path)
subdirs = [join(project_path, s) for s in subdirectories]
for subdir in subdirs:
assert not os.path.exists(subdir)
try:
os.mkdir(project_path)
for subdir in subdirs:
os.mkdir(subdir)
func(*args, **kwargs)
finally:
# Always remove the project path to make sure it's gone if we
# failed somewhere along the way
shutil.rmtree(project_path)
assert not os.path.exists(project_path)
return test_wrapper
return func_wrapper
# Helper function to write yaml files
def _write_file(config_file, content):
with open(config_file, 'w') as yml:
yml.write(content)
def with_temporary_file(config_file, content):
"""Write and destroy a temporary file for configuration tests.
Used to decorate a test that depends on a temporary (typically a YAML)
file. Asserts the file does not exist, writes the file, executes the test
and then destroys the file.
Parameters
----------
config_file : str or unicode
The path where the configuration file should be written.
content : str or unicode
The content of the YAML file
"""
def func_wrapper(func):
def actual_wrapper(*args, **kwargs):
assert not os.path.exists(config_file)
try:
_write_file(config_file, content)
func(*args, **kwargs)
finally:
os.unlink(config_file)
return actual_wrapper
return func_wrapper
class SimulatedNamespace(object):
# Simulate an Arguments Namespace object
def __init__(self, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
| 29.235294 | 77 | 0.63548 |
4b6a8a83bbd6dbd5fb7489c182bd42c7b78e969c | 739 | py | Python | Lectures_Codes/examples-11/cdecorators.py | MichalKyjovsky/NPRG065_Programing_in_Python | 14436fbf8f0e547ab084083135a84c8ae49e083c | [
"MIT"
] | null | null | null | Lectures_Codes/examples-11/cdecorators.py | MichalKyjovsky/NPRG065_Programing_in_Python | 14436fbf8f0e547ab084083135a84c8ae49e083c | [
"MIT"
] | null | null | null | Lectures_Codes/examples-11/cdecorators.py | MichalKyjovsky/NPRG065_Programing_in_Python | 14436fbf8f0e547ab084083135a84c8ae49e083c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
def prefix(class_):
class_.log_prefix = 'DEBUG'
return class_
@prefix
class Foo:
def __init__(self):
print(f'{self.log_prefix}: instance created')
foo = Foo()
# We can also add methods via the decorator
def bar(class_):
def bar(self):
print(f'{self.__class__.__name__}({self})')
class_.bar = bar
return class_
@bar
class Bar:
def __init__(self):
self.a = 1
b = Bar()
b.bar()
# But there are limitations
# We cannot override the added method as it has been added AFTER the class definition
@bar
class AnotherBar:
def bar(self):
print('overriding bar')
ab = AnotherBar()
ab.bar() # uses bar() from the decorator
| 15.395833 | 85 | 0.638701 |
b17dea1c2a6e3f16e7a4137562bf28a44ef93c41 | 9,137 | py | Python | icesat2_toolkit/read_ICESat2_ATL07.py | outlk/read-ICESat-2 | 4a1e90038548a050b4bdbcbcf9e4fb7864a52b9f | [
"MIT"
] | 50 | 2019-07-22T14:13:28.000Z | 2022-03-16T19:18:07.000Z | icesat2_toolkit/read_ICESat2_ATL07.py | outlk/read-ICESat-2 | 4a1e90038548a050b4bdbcbcf9e4fb7864a52b9f | [
"MIT"
] | 2 | 2020-08-16T06:52:24.000Z | 2021-07-12T23:05:07.000Z | icesat2_toolkit/read_ICESat2_ATL07.py | outlk/read-ICESat-2 | 4a1e90038548a050b4bdbcbcf9e4fb7864a52b9f | [
"MIT"
] | 19 | 2019-07-01T03:01:01.000Z | 2022-02-25T00:29:44.000Z | #!/usr/bin/env python
u"""
read_ICESat2_ATL07.py (02/2021)
Read ICESat-2 ATL07 (Sea Ice Height) data files
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://www.h5py.org/
UPDATE HISTORY:
Updated 10/2021: using python logging for handling verbose output
Updated 02/2021: add check if input streaming from bytes
Updated 10/2020: add small function to find valid beam groups
Updated 07/2020: added function docstrings
Updated 11/2019: create attribute dictionaries but don't fill if False
Written 11/2019
"""
from __future__ import print_function
import os
import io
import re
import h5py
import logging
import numpy as np
#-- PURPOSE: read ICESat-2 ATL07 HDF5 data files
def read_HDF5_ATL07(FILENAME, ATTRIBUTES=False, **kwargs):
"""
Reads ICESat-2 ATL07 (Sea Ice Height) data files
Arguments
---------
FILENAME: full path to ATL07 file
Keyword arguments
-----------------
ATTRIBUTES: read HDF5 attributes for groups and variables
Returns
-------
IS2_atl07_mds: dictionary with ATL07 variables
IS2_atl07_attrs: dictionary with ATL07 attributes
IS2_atl07_beams: list with valid ICESat-2 beams within ATL07 file
"""
#-- Open the HDF5 file for reading
if isinstance(FILENAME, io.IOBase):
fileID = h5py.File(FILENAME, 'r')
else:
fileID = h5py.File(os.path.expanduser(FILENAME), 'r')
#-- Output HDF5 file information
logging.info(fileID.filename)
logging.info(list(fileID.keys()))
#-- allocate python dictionaries for ICESat-2 ATL07 variables and attributes
IS2_atl07_mds = {}
IS2_atl07_attrs = {}
#-- read each input beam within the file
IS2_atl07_beams = []
for gtx in [k for k in fileID.keys() if bool(re.match(r'gt\d[lr]',k))]:
#-- check if subsetted beam contains sea ice data
try:
fileID[gtx]['sea_ice_segments']['height_segment_id']
except KeyError:
pass
else:
IS2_atl07_beams.append(gtx)
#-- read each input beam within the file
for gtx in IS2_atl07_beams:
IS2_atl07_mds[gtx] = {}
IS2_atl07_mds[gtx]['sea_ice_segments'] = {}
IS2_atl07_mds[gtx]['sea_ice_segments']['geolocation'] = {}
IS2_atl07_mds[gtx]['sea_ice_segments']['geophysical'] = {}
IS2_atl07_mds[gtx]['sea_ice_segments']['heights'] = {}
IS2_atl07_mds[gtx]['sea_ice_segments']['stats'] = {}
#-- get each HDF5 variable
#-- ICESat-2 sea_ice_segments Group
for key,val in fileID[gtx]['sea_ice_segments'].items():
if isinstance(val, h5py.Dataset):
IS2_atl07_mds[gtx]['sea_ice_segments'][key] = val[:]
elif isinstance(val, h5py.Group):
for k,v in val.items():
IS2_atl07_mds[gtx]['sea_ice_segments'][key][k] = v[:]
#-- Getting attributes of included variables
if ATTRIBUTES:
#-- Getting attributes of ICESat-2 ATL07 beam variables
IS2_atl07_attrs[gtx] = {}
IS2_atl07_attrs[gtx]['sea_ice_segments'] = {}
IS2_atl07_attrs[gtx]['sea_ice_segments']['geolocation'] = {}
IS2_atl07_attrs[gtx]['sea_ice_segments']['geophysical'] = {}
IS2_atl07_attrs[gtx]['sea_ice_segments']['heights'] = {}
IS2_atl07_attrs[gtx]['sea_ice_segments']['stats'] = {}
#-- Global Group Attributes for ATL07 beam
for att_name,att_val in fileID[gtx].attrs.items():
IS2_atl07_attrs[gtx][att_name] = att_val
for key,val in fileID[gtx]['sea_ice_segments'].items():
IS2_atl07_attrs[gtx]['sea_ice_segments'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl07_attrs[gtx]['sea_ice_segments'][key][att_name] = att_val
if isinstance(val, h5py.Group):
for k,v in val.items():
IS2_atl07_attrs[gtx]['sea_ice_segments'][key][k] = {}
for att_name,att_val in v.attrs.items():
IS2_atl07_attrs[gtx]['sea_ice_segments'][key][k][att_name] = att_val
#-- ICESat-2 orbit_info Group
IS2_atl07_mds['orbit_info'] = {}
for key,val in fileID['orbit_info'].items():
IS2_atl07_mds['orbit_info'][key] = val[:]
#-- ICESat-2 quality_assessment Group
IS2_atl07_mds['quality_assessment'] = {}
for key,val in fileID['quality_assessment'].items():
if isinstance(val, h5py.Dataset):
IS2_atl07_mds['quality_assessment'][key] = val[:]
elif isinstance(val, h5py.Group):
IS2_atl07_mds['quality_assessment'][key] = {}
for k,v in val.items():
IS2_atl07_mds['quality_assessment'][key][k] = v[:]
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01:T00:00:00Z UTC)
#-- Add this value to delta time parameters to compute full gps_seconds
#-- could alternatively use the Julian day of the ATLAS SDP epoch: 2458119.5
#-- and add leap seconds since 2018-01-01:T00:00:00Z UTC (ATLAS SDP epoch)
IS2_atl07_mds['ancillary_data'] = {}
IS2_atl07_attrs['ancillary_data'] = {}
for key in ['atlas_sdp_gps_epoch']:
#-- get each HDF5 variable
IS2_atl07_mds['ancillary_data'][key] = fileID['ancillary_data'][key][:]
#-- Getting attributes of group and included variables
if ATTRIBUTES:
#-- Variable Attributes
IS2_atl07_attrs['ancillary_data'][key] = {}
for att_name,att_val in fileID['ancillary_data'][key].attrs.items():
IS2_atl07_attrs['ancillary_data'][key][att_name] = att_val
#-- sea ice ancillary information (processing flags and parameters)
for cal in ('fine_surface_finding','sea_ice','surface_classification'):
IS2_atl07_mds['ancillary_data'][cal] = {}
IS2_atl07_attrs['ancillary_data'][cal] = {}
for key,val in fileID['ancillary_data'][cal].items():
#-- get each HDF5 variable
IS2_atl07_mds['ancillary_data'][cal][key] = val[:]
#-- Getting attributes of group and included variables
if ATTRIBUTES:
#-- Variable Attributes
IS2_atl07_attrs['ancillary_data'][cal][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl07_attrs['ancillary_data'][cal][key][att_name] = att_val
#-- get each global attribute and the attributes for orbit and quality
if ATTRIBUTES:
#-- ICESat-2 HDF5 global attributes
for att_name,att_val in fileID.attrs.items():
IS2_atl07_attrs[att_name] = att_name
#-- ICESat-2 orbit_info Group
IS2_atl07_attrs['orbit_info'] = {}
for key,val in fileID['orbit_info'].items():
IS2_atl07_attrs['orbit_info'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl07_attrs['orbit_info'][key][att_name]= att_val
#-- ICESat-2 quality_assessment Group
IS2_atl07_attrs['quality_assessment'] = {}
for key,val in fileID['quality_assessment'].items():
IS2_atl07_attrs['quality_assessment'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl07_attrs['quality_assessment'][key][att_name]= att_val
if isinstance(val, h5py.Group):
for k,v in val.items():
IS2_atl07_attrs['quality_assessment'][key][k] = {}
for att_name,att_val in v.attrs.items():
IS2_atl07_attrs['quality_assessment'][key][k][att_name]= att_val
#-- Closing the HDF5 file
fileID.close()
#-- Return the datasets and variables
return (IS2_atl07_mds,IS2_atl07_attrs,IS2_atl07_beams)
#-- PURPOSE: find valid beam groups within ICESat-2 ATL07 HDF5 data files
def find_HDF5_ATL07_beams(FILENAME):
"""
Find valid beam groups within ICESat-2 ATL07 (Sea Ice Height) data files
Arguments
---------
FILENAME: full path to ATL07 file
Returns
-------
IS2_atl07_beams: list with valid ICESat-2 beams within ATL07 file
"""
#-- Open the HDF5 file for reading
if isinstance(FILENAME, io.IOBase):
fileID = h5py.File(FILENAME, 'r')
else:
fileID = h5py.File(os.path.expanduser(FILENAME), 'r')
#-- output list of beams
IS2_atl07_beams = []
#-- read each input beam within the file
for gtx in [k for k in fileID.keys() if bool(re.match(r'gt\d[lr]',k))]:
#-- check if subsetted beam contains sea ice data
try:
fileID[gtx]['sea_ice_segments']['height_segment_id']
except KeyError:
pass
else:
IS2_atl07_beams.append(gtx)
#-- Closing the HDF5 file
fileID.close()
#-- return the list of beams
return IS2_atl07_beams | 41.912844 | 96 | 0.625917 |
e04d38da9d3cdc589b8d337923b8dcf993494b32 | 548 | py | Python | catalog/config/urls.py | eduleones/catalog-backend-example | 8938662d391daad94e73152f7291e800a360e689 | [
"MIT"
] | null | null | null | catalog/config/urls.py | eduleones/catalog-backend-example | 8938662d391daad94e73152f7291e800a360e689 | [
"MIT"
] | 6 | 2019-12-04T23:52:51.000Z | 2022-02-10T12:30:40.000Z | catalog/config/urls.py | eduleones/catalog-backend-example | 8938662d391daad94e73152f7291e800a360e689 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from drf.swagger_utils import get_swagger_view
docs_view = get_swagger_view(title='Catalog')
urlpatterns = [
path('admin/', admin.site.urls),
path('docs/', docs_view),
path(
'shoes/',
include(('shoes.urls', 'shoes'), namespace='shoes')
),
]
if settings.DEBUG:
urlpatterns += static(
settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT
)
| 21.076923 | 59 | 0.686131 |
3910ee48f2f707a31c6dd302887668cf838acf9f | 1,401 | py | Python | part03-e15_inverse_series/test/test_inverse_series.py | alekshiidenhovi/Helsinki-University-Data-Analysis-with-Python | bc27fa585d22d630a38312ee7c4b2173d5b80d12 | [
"MIT"
] | null | null | null | part03-e15_inverse_series/test/test_inverse_series.py | alekshiidenhovi/Helsinki-University-Data-Analysis-with-Python | bc27fa585d22d630a38312ee7c4b2173d5b80d12 | [
"MIT"
] | null | null | null | part03-e15_inverse_series/test/test_inverse_series.py | alekshiidenhovi/Helsinki-University-Data-Analysis-with-Python | bc27fa585d22d630a38312ee7c4b2173d5b80d12 | [
"MIT"
] | 2 | 2022-02-14T20:07:29.000Z | 2022-03-11T07:30:23.000Z | #!/usr/bin/env python3
import unittest
from unittest.mock import patch
import pandas as pd
import numpy as np
from tmc import points
from tmc.utils import load, get_stdout, patch_helper
module_name="src.inverse_series"
inverse_series = load(module_name, "inverse_series")
main = load(module_name, "main")
ph = patch_helper(module_name)
@points('p03-15.1')
class InverseSeries(unittest.TestCase):
def test_first(self):
L=[1,2,3,1]
ind=list("abcd")
s = pd.Series(L, index=ind)
t = inverse_series(s)
np.testing.assert_array_equal(t.values, ind, err_msg="Values were incorrect!")
np.testing.assert_array_equal(t.index, L, err_msg="Index was incorrect!")
def test_second(self):
L=list("efgh")
ind=list("abcd")
s = pd.Series(L, index=ind)
t = inverse_series(s)
np.testing.assert_array_equal(t.values, ind, err_msg="Values were incorrect!")
np.testing.assert_array_equal(t.index, L, err_msg="Index was incorrect!")
def test_empty(self):
s = pd.Series()
t = inverse_series(s)
self.assertEqual(len(t), 0, msg="Inversed empty Series should have length zero!")
def test_called(self):
with patch(ph("inverse_series"), wrap=inverse_series) as pis:
main()
pis.assert_called()
if __name__ == '__main__':
unittest.main()
| 28.02 | 89 | 0.654532 |
d6c83d0c8ed13c37d7f33c9b9f2386797fdbe9f2 | 31,962 | py | Python | autopandas_v2/tests/io_featurizer_old.py | chyanju/autopandas | 16080ad12f0e8e7b0a614671aea1ed57b3fed7fe | [
"BSD-3-Clause"
] | 16 | 2019-08-13T02:49:44.000Z | 2022-02-08T03:14:34.000Z | autopandas_v2/tests/io_featurizer_old.py | chyanju/autopandas | 16080ad12f0e8e7b0a614671aea1ed57b3fed7fe | [
"BSD-3-Clause"
] | 2 | 2020-09-25T22:40:40.000Z | 2022-02-09T23:42:53.000Z | autopandas_v2/tests/io_featurizer_old.py | chyanju/autopandas | 16080ad12f0e8e7b0a614671aea1ed57b3fed7fe | [
"BSD-3-Clause"
] | 3 | 2021-07-06T10:30:36.000Z | 2022-01-11T23:21:31.000Z | import unittest
import pandas as pd
import numpy as np
from autopandas_v2.ml.featurization.io_featurizer_old import RelationGraphNode, RelationGraph, RelationGraphEdge, \
RelationGraphEdgeType, RelationGraphNodeType, RelationGraphOptions
get_node_type = RelationGraphNodeType.get_node_type
class TestRelationGraphFeaturizer(unittest.TestCase):
def test_basic_max(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = RelationGraphNode("I0", (0, 0), get_node_type(input_df.iat[0, 0]))
input_01 = RelationGraphNode("I0", (0, 1), get_node_type(input_df.iat[0, 1]))
input_10 = RelationGraphNode("I0", (1, 0), get_node_type(input_df.iat[1, 0]))
input_11 = RelationGraphNode("I0", (1, 1), get_node_type(input_df.iat[1, 1]))
input_20 = RelationGraphNode("I0", (2, 0), get_node_type(input_df.iat[2, 0]))
input_21 = RelationGraphNode("I0", (2, 1), get_node_type(input_df.iat[2, 1]))
output_df = pd.DataFrame([[2, 3]])
output_00 = RelationGraphNode("O0", (0, 0), get_node_type(output_df.iat[0, 0]))
output_01 = RelationGraphNode("O0", (0, 1), get_node_type(output_df.iat[0, 1]))
options = RelationGraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_df], output_df, options)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
RelationGraphEdge(input_00, input_01, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_00, input_10, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_10, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_10, input_20, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_20, input_21, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_01, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_11, input_21, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_00, output_01, RelationGraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
RelationGraphEdge(input_10, output_00, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_20, output_00, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_01, output_00, RelationGraphEdgeType.EQUALITY), # redundant
RelationGraphEdge(input_11, output_01, RelationGraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_max_series(self):
input_df = pd.DataFrame([[1, 2], [2, 3], [2, 0]])
input_00 = RelationGraphNode("I0", (0, 0), get_node_type(input_df.iat[0, 0]))
input_01 = RelationGraphNode("I0", (0, 1), get_node_type(input_df.iat[0, 1]))
input_10 = RelationGraphNode("I0", (1, 0), get_node_type(input_df.iat[1, 0]))
input_11 = RelationGraphNode("I0", (1, 1), get_node_type(input_df.iat[1, 1]))
input_20 = RelationGraphNode("I0", (2, 0), get_node_type(input_df.iat[2, 0]))
input_21 = RelationGraphNode("I0", (2, 1), get_node_type(input_df.iat[2, 1]))
output = pd.DataFrame.max(input_df)
output_00 = RelationGraphNode("O0", (0, 0), get_node_type(output.iat[0]))
output_10 = RelationGraphNode("O0", (1, 0), get_node_type(output.iat[1]))
options = RelationGraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_df], output, options)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
RelationGraphEdge(input_00, input_01, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_00, input_10, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_10, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_10, input_20, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_20, input_21, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_01, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_11, input_21, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_00, output_10, RelationGraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# equality edges
equality_edges = [
RelationGraphEdge(input_10, output_00, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_20, output_00, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_01, output_00, RelationGraphEdgeType.EQUALITY), # redundant
RelationGraphEdge(input_11, output_10, RelationGraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_values(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = RelationGraphNode("I0", (0, 0), get_node_type(input_df.iat[0, 0]))
input_01 = RelationGraphNode("I0", (0, 1), get_node_type(input_df.iat[0, 1]))
input_10 = RelationGraphNode("I0", (1, 0), get_node_type(input_df.iat[1, 0]))
input_11 = RelationGraphNode("I0", (1, 1), get_node_type(input_df.iat[1, 1]))
output = input_df.values
output_00 = RelationGraphNode("O0", (0, 0), get_node_type(output[0, 0]))
output_01 = RelationGraphNode("O0", (0, 1), get_node_type(output[0, 1]))
output_10 = RelationGraphNode("O0", (1, 0), get_node_type(output[1, 0]))
output_11 = RelationGraphNode("O0", (1, 1), get_node_type(output[1, 1]))
options = RelationGraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_df], output, options)
rel_graph_edges = rel_graph.edges
# positional edges
positional_edges = [
RelationGraphEdge(input_00, input_01, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_00, input_10, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_10, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_01, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_00, output_01, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_00, output_10, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_10, output_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_01, output_11, RelationGraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
RelationGraphEdge(input_00, output_00, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_10, output_10, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_01, output_01, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_11, output_11, RelationGraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_dict(self):
input_df = pd.DataFrame([[1, 2], [3, 4]])
input_00 = RelationGraphNode("I0", (0, 0), get_node_type(input_df.iat[0, 0]))
input_01 = RelationGraphNode("I0", (0, 1), get_node_type(input_df.iat[0, 1]))
input_10 = RelationGraphNode("I0", (1, 0), get_node_type(input_df.iat[1, 0]))
input_11 = RelationGraphNode("I0", (1, 1), get_node_type(input_df.iat[1, 1]))
output = {"A": [1, 3], "B": [2, 4]}
output_00 = RelationGraphNode("O0", (0, 0), get_node_type(output['A'][0]))
output_01 = RelationGraphNode("O0", (0, 1), get_node_type(output['B'][0]))
output_10 = RelationGraphNode("O0", (1, 0), get_node_type(output['A'][1]))
output_11 = RelationGraphNode("O0", (1, 1), get_node_type(output['B'][1]))
options = RelationGraphOptions()
options.NODE_TYPES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_df], output, options)
rel_graph_edges = rel_graph.edges
positional_edges = [
RelationGraphEdge(input_00, input_01, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_00, input_10, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_10, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(input_01, input_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_00, output_01, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_00, output_10, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_10, output_11, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(output_01, output_11, RelationGraphEdgeType.ADJACENCY)
]
for edge in positional_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
RelationGraphEdge(input_00, output_00, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_10, output_10, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_01, output_01, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(input_11, output_11, RelationGraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_groupby_output(self):
input_df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
output = input_df.groupby("Name")
options = RelationGraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_df], output, options)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
RelationGraphNode("I0", (0, 0), RelationGraphNodeType.STR)
]
alice_nodes_out = [
RelationGraphNode("O0", (0, 0), RelationGraphNodeType.STR)
]
bob_nodes_in = [
RelationGraphNode("I0", (1, 0), RelationGraphNodeType.STR),
RelationGraphNode("I0", (4, 0), RelationGraphNodeType.STR)
]
bob_nodes_out = [
RelationGraphNode("O1", (0, 0), RelationGraphNodeType.STR),
RelationGraphNode("O1", (1, 0), RelationGraphNodeType.STR)
]
mallory_nodes_in = [
RelationGraphNode("I0", (2, 0), RelationGraphNodeType.STR),
RelationGraphNode("I0", (3, 0), RelationGraphNodeType.STR),
RelationGraphNode("I0", (5, 0), RelationGraphNodeType.STR)
]
mallory_nodes_out = [
RelationGraphNode("O2", (0, 0), RelationGraphNodeType.STR),
RelationGraphNode("O2", (1, 0), RelationGraphNodeType.STR),
RelationGraphNode("O2", (2, 0), RelationGraphNodeType.STR)
]
seattle_nodes_in = [
RelationGraphNode("I0", (0, 1), RelationGraphNodeType.STR),
RelationGraphNode("I0", (1, 1), RelationGraphNodeType.STR),
RelationGraphNode("I0", (3, 1), RelationGraphNodeType.STR),
RelationGraphNode("I0", (4, 1), RelationGraphNodeType.STR),
]
seattle_nodes_out = [
RelationGraphNode("O0", (0, 1), RelationGraphNodeType.STR),
RelationGraphNode("O1", (0, 1), RelationGraphNodeType.STR),
RelationGraphNode("O2", (1, 1), RelationGraphNodeType.STR)
]
portland_nodes_in = [
RelationGraphNode("I0", (2, 1), RelationGraphNodeType.STR),
RelationGraphNode("I0", (5, 1), RelationGraphNodeType.STR)
]
portland_nodes_out = [
RelationGraphNode("O2", (0, 1), RelationGraphNodeType.STR),
RelationGraphNode("O2", (2, 1), RelationGraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = RelationGraphEdge(in_node, out_node, RelationGraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
check_edges(portland_nodes_in, portland_nodes_out)
check_edges(seattle_nodes_in, seattle_nodes_out)
def test_groupby_input(self):
df = pd.DataFrame({
"Name": ["Alice", "Bob", "Mallory", "Mallory", "Bob", "Mallory"],
"City": ["Seattle", "Seattle", "Portland", "Seattle", "Seattle", "Portland"]})
input_ = df.groupby("Name")
output = input_.count().reset_index()
options = RelationGraphOptions()
options.NODE_TYPES = True
options.ADJACENCY_EDGES = False
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_], output, options)
rel_graph_edges = rel_graph.edges
alice_nodes_in = [
RelationGraphNode("I0", (0, 0), RelationGraphNodeType.STR)
]
alice_nodes_out = [
RelationGraphNode("O0", (0, 0), RelationGraphNodeType.STR)
]
bob_nodes_in = [
RelationGraphNode("I1", (0, 0), RelationGraphNodeType.STR),
RelationGraphNode("I1", (1, 0), RelationGraphNodeType.STR)
]
bob_nodes_out = [
RelationGraphNode("O0", (1, 0), RelationGraphNodeType.STR)
]
mallory_nodes_in = [
RelationGraphNode("I2", (0, 0), RelationGraphNodeType.STR),
RelationGraphNode("I2", (1, 0), RelationGraphNodeType.STR),
RelationGraphNode("I2", (2, 0), RelationGraphNodeType.STR)
]
mallory_nodes_out = [
RelationGraphNode("O0", (2, 0), RelationGraphNodeType.STR)
]
def check_edges(in_nodes, out_nodes):
for in_node in in_nodes:
for out_node in out_nodes:
edge = RelationGraphEdge(in_node, out_node, RelationGraphEdgeType.EQUALITY)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
check_edges(alice_nodes_in, alice_nodes_out)
check_edges(bob_nodes_in, bob_nodes_out)
check_edges(mallory_nodes_in, mallory_nodes_out)
def test_idx_multi(self):
tuples = [("bar", "one"), ("bar", "two")]
index = pd.MultiIndex.from_tuples(tuples)
data = [[0], [1]]
input_df = pd.DataFrame(data, index=index)
# 0
# bar one 0
# two 1
output_df = input_df.unstack()
# 0
# one two
# bar 0 1
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_df], output_df, options)
rel_graph_edges = rel_graph.edges
bar_in_0 = RelationGraphNode("I0", (0, -2), RelationGraphNodeType.INDEX)
bar_in_1 = RelationGraphNode("I0", (1, -2), RelationGraphNodeType.INDEX)
bar_out = RelationGraphNode("O0", (0, -1), RelationGraphNodeType.INDEX)
one_in = RelationGraphNode("I0", (0, -1), RelationGraphNodeType.INDEX)
two_in = RelationGraphNode("I0", (1, -1), RelationGraphNodeType.INDEX)
one_out = RelationGraphNode("O0", (-1, 0), RelationGraphNodeType.COLUMN)
two_out = RelationGraphNode("O0", (-1, 1), RelationGraphNodeType.COLUMN)
in_0 = RelationGraphNode("I0", (0, 0), RelationGraphNodeType.INT)
in_1 = RelationGraphNode("I0", (1, 0), RelationGraphNodeType.INT)
out_0 = RelationGraphNode("O0", (0, 0), RelationGraphNodeType.INT)
out_1 = RelationGraphNode("O0", (0, 1), RelationGraphNodeType.INT)
adjacency_edges = [
RelationGraphEdge(bar_in_0, bar_in_1, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(bar_in_0, one_in, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(bar_in_1, two_in, RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(one_in, two_in, RelationGraphEdgeType.ADJACENCY)
]
for edge in adjacency_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
indexing_edges = [
RelationGraphEdge(bar_in_0, in_0, RelationGraphEdgeType.INDEX),
RelationGraphEdge(one_in, in_0, RelationGraphEdgeType.INDEX),
RelationGraphEdge(bar_in_1, in_1, RelationGraphEdgeType.INDEX),
RelationGraphEdge(two_in, in_1, RelationGraphEdgeType.INDEX),
RelationGraphEdge(bar_out, out_0, RelationGraphEdgeType.INDEX),
RelationGraphEdge(bar_out, out_1, RelationGraphEdgeType.INDEX)
]
for edge in indexing_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
equality_edges = [
RelationGraphEdge(bar_in_0, bar_out, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(bar_in_1, bar_out, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(one_in, one_out, RelationGraphEdgeType.EQUALITY),
RelationGraphEdge(two_in, two_out, RelationGraphEdgeType.EQUALITY)
]
for edge in equality_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
def test_column_multi(self):
column_labels = [['bar', 'bar', 'baz', 'baz'], ['one', 'two', 'one', 'two']]
tuples = list(zip(*column_labels))
col_index = pd.MultiIndex.from_tuples(tuples)
data = [[0, 1, 2, 3], [4, 5, 6, 7]]
input_df = pd.DataFrame(data, columns=col_index)
# bar baz
# one two one two
# 0 0 1 2 3
# 1 4 5 6 7
output_df = input_df.stack().reset_index()
# level_0 level_1 bar baz
# 0 0 one 0 2
# 1 0 two 1 3
# 2 1 one 4 6
# 3 1 two 5 7
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([input_df], output_df, options)
rel_graph_edges = rel_graph.edges
col_nodes = [[RelationGraphNode("I0", (-2, 0), RelationGraphNodeType.COLUMN),
RelationGraphNode("I0", (-2, 1), RelationGraphNodeType.COLUMN),
RelationGraphNode("I0", (-2, 2), RelationGraphNodeType.COLUMN),
RelationGraphNode("I0", (-2, 3), RelationGraphNodeType.COLUMN)],
[RelationGraphNode("I0", (-1, 0), RelationGraphNodeType.COLUMN),
RelationGraphNode("I0", (-1, 1), RelationGraphNodeType.COLUMN),
RelationGraphNode("I0", (-1, 2), RelationGraphNodeType.COLUMN),
RelationGraphNode("I0", (-1, 3), RelationGraphNodeType.COLUMN)],
]
adjacency_edges = [
RelationGraphEdge(col_nodes[0][0], col_nodes[1][0], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[0][0], col_nodes[0][1], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[1][0], col_nodes[1][1], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[1][1], col_nodes[1][2], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[0][1], col_nodes[1][1], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[0][1], col_nodes[0][2], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[0][2], col_nodes[1][2], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[0][2], col_nodes[0][3], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[1][2], col_nodes[1][3], RelationGraphEdgeType.ADJACENCY),
RelationGraphEdge(col_nodes[0][3], col_nodes[1][3], RelationGraphEdgeType.ADJACENCY)
]
for edge in adjacency_edges:
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
# indexing edges
input_coli_elems = [
[RelationGraphNode("I0", (0, 0), RelationGraphNodeType.INT),
RelationGraphNode("I0", (1, 0), RelationGraphNodeType.INT)],
[RelationGraphNode("I0", (0, 1), RelationGraphNodeType.INT),
RelationGraphNode("I0", (1, 1), RelationGraphNodeType.INT)],
[RelationGraphNode("I0", (0, 2), RelationGraphNodeType.INT),
RelationGraphNode("I0", (1, 2), RelationGraphNodeType.INT)],
[RelationGraphNode("I0", (0, 3), RelationGraphNodeType.INT),
RelationGraphNode("I0", (1, 3), RelationGraphNodeType.INT)]
]
def check_edges(in_nodes, out_nodes, edge_type):
for in_node in in_nodes:
for out_node in out_nodes:
edge = RelationGraphEdge(in_node, out_node, edge_type)
self.assertTrue(edge in rel_graph_edges,
"Could not find edge %s in set of edges:\n%s" % (edge, rel_graph_edges))
for i in range(4):
in_nodes = [col_nodes[0][i], col_nodes[1][i]]
out_nodes = input_coli_elems[i]
check_edges(in_nodes, out_nodes, RelationGraphEdgeType.INDEX)
# equality_edges
bars = [col_nodes[0][0], col_nodes[0][1]]
bazs = [col_nodes[0][2], col_nodes[0][3]]
ones = [col_nodes[1][0], col_nodes[1][2]]
twos = [col_nodes[1][1], col_nodes[1][3]]
out_01 = RelationGraphNode("O0", (0, 1), RelationGraphNodeType.STR)
out_11 = RelationGraphNode("O0", (1, 1), RelationGraphNodeType.STR)
out_21 = RelationGraphNode("O0", (2, 1), RelationGraphNodeType.STR)
out_31 = RelationGraphNode("O0", (3, 1), RelationGraphNodeType.STR)
out_col_2 = RelationGraphNode("O0", (-1, 2), RelationGraphNodeType.COLUMN)
out_col_3 = RelationGraphNode("O0", (-1, 3), RelationGraphNodeType.COLUMN)
check_edges(bars, [out_col_2], RelationGraphEdgeType.EQUALITY)
check_edges(bazs, [out_col_3], RelationGraphEdgeType.EQUALITY)
check_edges(ones, [out_01, out_21], RelationGraphEdgeType.EQUALITY)
check_edges(twos, [out_11, out_31], RelationGraphEdgeType.EQUALITY)
def test_no_spurious_for_idx_arg(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns = ["A", "B"])
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
options.INFLUENCE_EDGES = False
rel_graph: RelationGraph = RelationGraph.build_relation_graph([df, df.columns], df, options)
index_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 4)
def test_no_spurious_for_list_arg(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns = ["A", "B"])
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([df, [1,3,4]], df, options)
index_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 4)
def test_series_has_idx_and_cols(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns = ["A", "B"])
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([df], df["A"], options)
index_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 3)
def test_groupby_has_artifacts(self):
df = pd.DataFrame([[5, 2], [2, 3], [2, 0]], columns = ["A", "B"])
output = df.groupby(by="A")
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = True
rel_graph: RelationGraph = RelationGraph.build_relation_graph([df], output, options)
index_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.INDEX]
column_type_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COLUMN]
self.assertEqual(len(index_type_nodes), 6)
self.assertEqual(len(column_type_nodes), 6)
def test_index_name_nodes(self):
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
output = df.pivot(index='foo', columns='bar', values='baz')
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.INDEX_NAME_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = False
rel_graph: RelationGraph = RelationGraph.build_relation_graph([df], output, options)
index_name_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.INDEX_NAME]
column_name_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COL_INDEX_NAME]
self.assertEqual(len(index_name_nodes), 1)
self.assertEqual(len(column_name_nodes), 1)
def test_index_name_nodes_multiindex(self):
df = pd.DataFrame([(389.0, 'fly'), (24.0, 'fly'), (80.5, 'run'), (np.nan, 'jump')],
index=pd.MultiIndex.from_tuples(
[('bird', 'falcon'), ('bird', 'parrot'), ('mammal', 'lion'),
('mammal', 'monkey')], names=['class', 'name']),
columns=pd.MultiIndex.from_tuples([('speed', 'max'), ('species', 'type')]))
df.columns.names = ['name1', 'name2']
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.INDEX_NAME_NODES = True
options.ADJACENCY_EDGES = True
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = False
rel_graph: RelationGraph = RelationGraph.build_relation_graph([df], df, options)
index_name_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.INDEX_NAME]
column_name_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COL_INDEX_NAME]
self.assertEqual(len(index_name_nodes), 4) # Both in the input and output, so x2
self.assertEqual(len(column_name_nodes), 4) # Both in the input and output, so x2
def test_index_name_equality_edges(self):
df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two', 'two'],
'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
'baz': [1, 2, 3, 4, 5, 6]})
output = df.pivot(index='foo', columns='bar', values='baz')
options = RelationGraphOptions()
options.COLUMN_NODES = True
options.INDEX_NODES = True
options.INDEX_NAME_NODES = True
options.ADJACENCY_EDGES = False
options.EQUALITY_EDGES = True
options.NODE_TYPES = True
options.INDEX_EDGES = False
rel_graph: RelationGraph = RelationGraph.build_relation_graph([df], output, options)
inp_col_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COLUMN
and node.dfindex.startswith("I")]
out_idx_name_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.INDEX_NAME
and node.dfindex.startswith("O")]
out_col_idx_name_nodes = [node for node in rel_graph.nodes if node._type == RelationGraphNodeType.COL_INDEX_NAME
and node.dfindex.startswith("O")]
def check_edge_exists(in_node: RelationGraphNode, out_node: RelationGraphNode, graph: RelationGraph):
for e in graph.edges:
if (e.node1 == in_node and e.node2 == out_node) or (e.node1 == out_node and e.node2 == in_node):
return True
return False
inp_foo_node = [i for i in inp_col_nodes if i.pos == (-1, 0)][0]
inp_bar_node = [i for i in inp_col_nodes if i.pos == (-1, 1)][0]
out_foo_node = [i for i in out_idx_name_nodes if i.pos == (-1, -1)][0]
out_bar_node = [i for i in out_col_idx_name_nodes if i.pos == (-1, -1)][0]
self.assertTrue(check_edge_exists(inp_foo_node, out_foo_node, rel_graph))
self.assertTrue(check_edge_exists(inp_bar_node, out_bar_node, rel_graph))
| 52.22549 | 120 | 0.635129 |
05362a6e56787dea98876c6541d857156e3b3def | 7,494 | py | Python | Graph/GraphAPI/app.py | jennyqujy/WikiScraper | baa0d42955662396ae631e4c32c7f47610cc06c0 | [
"Apache-2.0"
] | null | null | null | Graph/GraphAPI/app.py | jennyqujy/WikiScraper | baa0d42955662396ae631e4c32c7f47610cc06c0 | [
"Apache-2.0"
] | null | null | null | Graph/GraphAPI/app.py | jennyqujy/WikiScraper | baa0d42955662396ae631e4c32c7f47610cc06c0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
# Add the ptdraft folder path to the sys.path list
sys.path.append('/Users/Jenny/Desktop/cs242/Assignment2.0/Scraper/Graph')
import pdb
from flask import Flask, jsonify, abort, make_response, request
from flask_restful import Api, Resource, reqparse, fields, marshal
from flask_httpauth import HTTPBasicAuth
from initialize import GraphQuery
from initialize import InitGraph
app = Flask(__name__, static_url_path="")
api = Api(app)
auth = HTTPBasicAuth()
@auth.get_password
def get_password(username):
if username == 'jenny':
return 'python'
return None
@auth.error_handler
def unauthorized():
# return 403 instead of 401 to prevent browsers from displaying the default
# auth dialog
return make_response(jsonify({'message': 'Unauthorized access'}), 401)
"""
use field of film/actor to match the returning json
"""
film_field = {
'film_name': fields.String,
'film_value': fields.String,
'film_starrings': fields.List(fields.String)
}
actor_field = {
'actor_age': fields.String,
'actor_castings': fields.List(fields.String),
'actor_name': fields.String
}
"""
Film API for GET PUT and DELETE with '\actors\<string:film_name>'
"""
class FilmAPI(Resource):
## login with authentication
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, required=True, help='No film name provided', location='json')
self.reqparse.add_argument('value', type=float, required=True, location='json')
super(FilmAPI, self).__init__()
## Method for GET
## return film item with input name argument
def get(self, film_name):
if len(film_name) <= 0:
abort(400)
value = GraphQuery.getFilmValue(GraphQuery(),InitGraph.filmNodes, film_name)
starrings = GraphQuery.getActorsInFilm(GraphQuery(),InitGraph.filmNameDict,film_name)
data = {'film_name': film_name, 'film_value': value, 'film_starrings':starrings}
return make_response(jsonify({'films':marshal(data, film_field)}),200)
## Method for PUT
## return json of updated item
def put(self,film_name):
if len(film_name) <= 0:
abort(400)
value = GraphQuery.getFilmValue(GraphQuery(),InitGraph.filmNodes, film_name)
starrings = GraphQuery.getActorsInFilm(GraphQuery(),InitGraph.filmNameDict,film_name)
data = {'film_name': film_name, 'film_value': value, 'film_starrings':starrings}
args = request.get_json()
if args is not None:
for k, v in args.items():
if v is not None:
data[k] = v
return {'films':data}
## Method for DELETE
## return status_code
def delete(self,film_name):
if len(film_name) <= 0:
abort(400)
GraphQuery.removeFilm(GraphQuery(),InitGraph.filmNodes, InitGraph.filmNameDict, film_name)
response = make_response(jsonify({'result': True}),200)
return {'Status':response.status_code}
"""
Actor API for GET PUT and DELETE with '\actors\<string:actor_name>'
"""
class ActorAPI(Resource):
## login with authentication
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, location='json')
self.reqparse.add_argument('age', type=int, location='json')
super(ActorAPI, self).__init__()
## Method for GET
def get(self, actor_name):
if len(actor_name) <= 0:
abort(400)
age = GraphQuery.getActorAge(GraphQuery(),InitGraph.actorNodes, actor_name)
castings = GraphQuery.getActorCastings(GraphQuery(),InitGraph.actorNameDict,actor_name)
data = {'actor_name': actor_name, 'actor_age': age, 'actor_castings':castings}
return make_response(jsonify({'actors':marshal(data, actor_field)}),200)
## Method for PUT
def put(self,actor_name):
if len(actor_name) <= 0:
abort(400)
age = GraphQuery.getActorAge(GraphQuery(),InitGraph.actorNodes, actor_name)
castings = GraphQuery.getActorCastings(GraphQuery(),InitGraph.actorNameDict,actor_name)
data = {'actor_name': actor_name, 'actor_age': age, 'actor_castings':castings}
args = request.get_json()
if args is not None:
for k, v in args.items():
if v is not None:
data[k] = v
return {'actors':data}
## Method for DELETE
def delete(self,actor_name):
GraphQuery.removeActor(GraphQuery(),InitGraph.actorNodes, InitGraph.actorNameDict, actor_name)
response = make_response(jsonify({'result': True}),200)
return {'Status':response.status_code}
"""
Actor API for GET and POST with '/actors'
"""
class ActorListAPI(Resource):
## login with authentication
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, required=True, help='No actor name provided', location='json')
self.reqparse.add_argument('age', type=int, required=True, location='json')
super(ActorListAPI, self).__init__()
## Method for GET
def get(self):
name = request.args.get('name')
if name is not None and len(name) <= 0:
abort(400)
data = GraphQuery.getOtherActors(GraphQuery(),InitGraph.actorNodes, name)
return {'actors': [act for act in data]}
## Method for POST
def post(self):
data = self.reqparse.parse_args()
if len(data['name']) <= 0:
abort(400)
GraphQuery.addActor(GraphQuery(),InitGraph.actorNodes, InitGraph.actorNameDict, data['name'], data['age'])
response = make_response(jsonify(data),200)
return {'Status':response.status_code}
"""
Film API for GET and POST with '/movies'
"""
class FilmListAPI(Resource):
## login with authentication
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('name', type=str, required=True, help='No film name provided', location='json')
self.reqparse.add_argument('value', type=float, required=True, location='json')
super(FilmListAPI, self).__init__()
## Method for GET
def get(self):
name = request.args.get('name')
if len(name) <= 0:
abort(400)
data = GraphQuery.getOtherFilms(GraphQuery(),InitGraph.filmNodes, name)
return {'films': [film for film in data]}
## Method for POST
def post(self):
# pdb.set_trace()
data = self.reqparse.parse_args()
if len(data['name']) <= 0:
abort(400)
GraphQuery.addFilm(GraphQuery(),InitGraph.filmNodes, InitGraph.filmNameDict, data['name'], data['value'])
response = make_response(jsonify(data),200)
return {'Status':response.status_code}
"""
Add the above four resource to our api.
"""
if __name__ == '__main__':
api.add_resource(ActorAPI, '/graph/api/actors/<string:actor_name>', endpoint='actor')
api.add_resource(ActorListAPI, '/graph/api/actors', endpoint='actor_list')
api.add_resource(FilmAPI, '/graph/api/movies/<string:film_name>', endpoint='film')
api.add_resource(FilmListAPI, '/graph/api/movies', endpoint='film_list')
app.run(debug=True)
| 36.735294 | 115 | 0.662663 |
a6c8e9e273cb2e08fc789acfdf9f92cb4e70f341 | 5,282 | py | Python | fluid/object_detection/train.py | sladesha/models | 9264e8cebb0219a9cd765511b8a7c9236f0b1da8 | [
"Apache-2.0"
] | null | null | null | fluid/object_detection/train.py | sladesha/models | 9264e8cebb0219a9cd765511b8a7c9236f0b1da8 | [
"Apache-2.0"
] | null | null | null | fluid/object_detection/train.py | sladesha/models | 9264e8cebb0219a9cd765511b8a7c9236f0b1da8 | [
"Apache-2.0"
] | 1 | 2018-09-10T02:11:21.000Z | 2018-09-10T02:11:21.000Z | import paddle.v2 as paddle
import paddle.fluid as fluid
import reader
import load_model as load_model
from mobilenet_ssd import mobile_net
from utility import add_arguments, print_arguments
import os
import numpy as np
import argparse
import functools
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('parallel', bool, True, "Whether use parallel training.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
# yapf: disable
def train(args,
train_file_list,
val_file_list,
data_args,
learning_rate,
batch_size,
num_passes,
model_save_dir='model',
init_model_path=None):
image_shape = [3, data_args.resize_h, data_args.resize_w]
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
gt_box = fluid.layers.data(
name='gt_box', shape=[4], dtype='float32', lod_level=1)
gt_label = fluid.layers.data(
name='gt_label', shape=[1], dtype='int32', lod_level=1)
difficult = fluid.layers.data(
name='gt_difficult', shape=[1], dtype='int32', lod_level=1)
if args.parallel:
places = fluid.layers.get_places()
pd = fluid.layers.ParallelDo(places)
with pd.do():
image_ = pd.read_input(image)
gt_box_ = pd.read_input(gt_box)
gt_label_ = pd.read_input(gt_label)
difficult_ = pd.read_input(difficult)
locs, confs, box, box_var = mobile_net(image_, image_shape)
loss = fluid.layers.ssd_loss(locs, confs, gt_box_, gt_label_,
box, box_var)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=0.45)
loss = fluid.layers.reduce_sum(loss)
pd.write_output(loss)
pd.write_output(nmsed_out)
loss, nmsed_out = pd()
loss = fluid.layers.mean(loss)
else:
locs, confs, box, box_var = mobile_net(image, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=0.45)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label,
box, box_var)
loss = fluid.layers.reduce_sum(loss)
test_program = fluid.default_main_program().clone(for_test=True)
with fluid.program_guard(test_program):
map_eval = fluid.evaluator.DetectionMAP(
nmsed_out,
gt_label,
gt_box,
difficult,
21,
overlap_threshold=0.5,
evaluate_difficult=False,
ap_version='11point')
boundaries = [40000, 60000]
values = [0.001, 0.0005, 0.00025]
optimizer = fluid.optimizer.RMSProp(
learning_rate=fluid.layers.piecewise_decay(boundaries, values),
regularization=fluid.regularizer.L2Decay(0.00005), )
optimizer.minimize(loss)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
load_model.load_and_set_vars(place)
#load_model.load_paddlev1_vars(place)
train_reader = paddle.batch(
reader.train(data_args, train_file_list), batch_size=batch_size)
test_reader = paddle.batch(
reader.test(data_args, val_file_list), batch_size=batch_size)
feeder = fluid.DataFeeder(
place=place, feed_list=[image, gt_box, gt_label, difficult])
def test(pass_id):
_, accum_map = map_eval.get_map_var()
map_eval.reset(exe)
test_map = None
for _, data in enumerate(test_reader()):
test_map = exe.run(test_program,
feed=feeder.feed(data),
fetch_list=[accum_map])
print("Test {0}, map {1}".format(pass_id, test_map[0]))
for pass_id in range(num_passes):
for batch_id, data in enumerate(train_reader()):
loss_v = exe.run(fluid.default_main_program(),
feed=feeder.feed(data),
fetch_list=[loss])
if batch_id % 20 == 0:
print("Pass {0}, batch {1}, loss {2}"
.format(pass_id, batch_id, loss_v[0]))
test(pass_id)
if pass_id % 10 == 0:
model_path = os.path.join(model_save_dir, str(pass_id))
print 'save models to %s' % (model_path)
fluid.io.save_inference_model(model_path, ['image'], [nmsed_out],
exe)
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
data_args = reader.Settings(
data_dir='./data',
label_file='label_list',
apply_distort=True,
apply_expand=True,
resize_h=300,
resize_w=300,
mean_value=[127.5, 127.5, 127.5])
train(args,
train_file_list='./data/trainval.txt',
val_file_list='./data/test.txt',
data_args=data_args,
learning_rate=0.001,
batch_size=args.batch_size,
num_passes=300)
| 36.427586 | 79 | 0.60602 |
e5bada9a7b33379093adbf126eac537736072724 | 2,356 | py | Python | backtest/graph_util.py | waynewan/backtest | e01ee735c373c31dc2f867ad0437ade1e29b4a22 | [
"Apache-2.0"
] | null | null | null | backtest/graph_util.py | waynewan/backtest | e01ee735c373c31dc2f867ad0437ade1e29b4a22 | [
"Apache-2.0"
] | null | null | null | backtest/graph_util.py | waynewan/backtest | e01ee735c373c31dc2f867ad0437ade1e29b4a22 | [
"Apache-2.0"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
from jackutil.containerutil import extractValue
from tqdm.auto import tqdm
# --
# -- use in jupyter-notebook env (ipython) only
# --
def plot_account_cumprofit(account,title=None):
df0 = account.to_dataframe()
df0.plot(x='exit_date',y='cumprofit',logy=True,figsize=(12,8),title=title)
def plot_multilines_by_category(df,*,category,xcol,ycol):
df0 = df.pivot(index=xcol,columns=category,values=ycol)
df0.plot()
def plot_3d_surface(title=None,data=None,ax=None):
# --
# -- use pivot(index=summary.columns[0],columns=summary.columns[1],values=summary.columns[2])
# -- for example:
# -- max_age 300 400 500 600 700 800 900
# -- maxpos
# -- 5 142.42 164.48 331.14 324.42 294.71 279.05 296.55
# -- 15 136.87 164.50 287.95 308.02 245.78 227.77 223.29
# -- 25 101.27 110.37 195.94 205.23 177.73 172.17 164.12
# -- 35 65.60 84.68 148.45 149.78 130.96 124.81 118.92
# -- 45 54.97 72.64 117.07 114.45 98.54 96.33 92.28
# --
X,Y = np.meshgrid(data.index,data.columns)
Z = data.T.to_numpy()
ax.plot_surface(X=X,Y=Y,Z=Z,cmap='gray',edgecolor='none',alpha=0.5)
ax.contour(X=X,Y=Y,Z=Z,zdir='x',cmap='Reds',offset=0)
ax.contour(X=X,Y=Y,Z=Z,zdir='y',cmap='Greens',offset=np.max(Y))
ax.set_title(title)
ax.set_xlabel(data.index.name)
ax.set_ylabel(data.columns.name)
def plot_3d_surfaces(pivots,figsize=(40,40),arragement=33):
fig = plt.figure(figsize=figsize)
plt_id = 10 * max(11,min(99, arragement))
for k,v in pivots.items():
plt_id += 1
plot_3d_surface(title=str(k),data=v,ax=fig.add_subplot(plt_id, projection='3d'))
def create_pivots_by_category(data,*,index,columns,values,categoryColumns=None):
if(categoryColumns is None):
categoryColumns = list( data.columns )
categoryColumns.pop(categoryColumns.index(index))
categoryColumns.pop(categoryColumns.index(columns))
categoryColumns.pop(categoryColumns.index(values))
categories = data.loc[:,categoryColumns].drop_duplicates()
cat_to_pivots = {}
for ii in range(0,len(categories)):
ii_category = categories.iloc[ii]
df = (data.loc[:,categoryColumns]==ii_category).all(axis=1)
df = data[df].loc[:,(index,columns,values)]
df = df.pivot(index=index,columns=columns,values=values)
cat_to_pivots[ii_category.to_json()] = df
return cat_to_pivots
| 38.622951 | 94 | 0.702886 |
e016bc80f0e8a23635bfb107f3239ee607679a06 | 433 | py | Python | py/_std.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2017-07-05T09:57:33.000Z | 2017-11-14T23:05:53.000Z | py/_std.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2019-03-23T08:23:21.000Z | 2019-03-23T08:23:21.000Z | py/_std.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2017-11-07T18:05:19.000Z | 2017-11-14T18:06:55.000Z | import sys
class Std(object):
""" makes top-level python modules available as an attribute,
importing them on first access.
"""
def __init__(self):
self.__dict__ = sys.modules
def __getattr__(self, name):
try:
m = __import__(name)
except ImportError:
raise AttributeError("py.std: could not import %s" % name)
return m
std = Std()
| 22.789474 | 71 | 0.570439 |
6a0ea10cf6314b4626ff7b9431d22bddebde78c1 | 2,436 | py | Python | flexget/components/pending_approval/db.py | guillaumelamirand/Flexget | 51945105c253a64c079eb5a62680392ce9013794 | [
"MIT"
] | 2 | 2017-03-25T10:39:25.000Z | 2019-12-11T03:46:26.000Z | flexget/components/pending_approval/db.py | guillaumelamirand/Flexget | 51945105c253a64c079eb5a62680392ce9013794 | [
"MIT"
] | null | null | null | flexget/components/pending_approval/db.py | guillaumelamirand/Flexget | 51945105c253a64c079eb5a62680392ce9013794 | [
"MIT"
] | null | null | null | import logging
from datetime import datetime, timedelta
from sqlalchemy import Boolean, Column, DateTime, Integer, String, Unicode
from flexget import db_schema
from flexget.event import event
from flexget.utils.database import entry_synonym
log = logging.getLogger('pending_approval')
Base = db_schema.versioned_base('pending_approval', 0)
class PendingEntry(Base):
__tablename__ = 'pending_entries'
id = Column(Integer, primary_key=True, autoincrement=True, nullable=False)
task_name = Column(Unicode)
title = Column(Unicode)
url = Column(String)
approved = Column(Boolean)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
added = Column(DateTime, default=datetime.now)
def __init__(self, task_name, entry):
self.task_name = task_name
self.title = entry['title']
self.url = entry['url']
self.approved = False
self.entry = entry
def __repr__(self):
return '<PendingEntry(task_name={},title={},url={},approved={})>'.format(
self.task_name, self.title, self.url, self.approved
)
def to_dict(self):
return {
'id': self.id,
'task_name': self.task_name,
'title': self.title,
'url': self.url,
'approved': self.approved,
'added': self.added,
}
@event('manager.db_cleanup')
def db_cleanup(manager, session):
# Clean unapproved entries older than 1 year
deleted = (
session.query(PendingEntry)
.filter(PendingEntry.added < datetime.now() - timedelta(days=365))
.delete()
)
if deleted:
log.info('Purged %i pending entries older than 1 year', deleted)
def list_pending_entries(
session, task_name=None, approved=None, start=None, stop=None, sort_by='added', descending=True
):
log.debug('querying pending entries')
query = session.query(PendingEntry)
if task_name:
query = query.filter(PendingEntry.task_name == task_name)
if approved is not None:
query = query.filter(PendingEntry.approved == approved)
if descending:
query = query.order_by(getattr(PendingEntry, sort_by).desc())
else:
query = query.order_by(getattr(PendingEntry, sort_by))
return query.slice(start, stop).all()
def get_entry_by_id(session, entry_id):
return session.query(PendingEntry).filter(PendingEntry.id == entry_id).one()
| 30.835443 | 99 | 0.663793 |
bc78d8c06390c284d06791776578b3e5a0c12241 | 4,206 | py | Python | socfaker/dns.py | priamai/soc-faker | 51b587f0cec52212136905280406e915006d2afc | [
"MIT"
] | 122 | 2020-02-21T16:06:54.000Z | 2022-03-21T13:53:03.000Z | socfaker/dns.py | priamai/soc-faker | 51b587f0cec52212136905280406e915006d2afc | [
"MIT"
] | 13 | 2020-01-29T16:37:05.000Z | 2022-01-27T21:30:10.000Z | socfaker/dns.py | priamai/soc-faker | 51b587f0cec52212136905280406e915006d2afc | [
"MIT"
] | 20 | 2020-04-10T11:59:29.000Z | 2022-02-10T09:20:26.000Z | import json
import os
from .network import Network
from .words import Words
from .organization import Organization
from .baseclass import BaseClass
class DNS(BaseClass):
"""The DNS class contains common information related to DNS data
Returns:
DNS: An object containing information related to DNS
"""
__TLD_DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'tld' + '.json'))
__WORDS_DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data', 'words' + '.txt'))
__WORD_LIST = Words().get()
__TLD_LIST = []
@property
def record(self):
"""A randomly selected record type
Returns:
str: A random DNS record (e.g. A, CNAME, PTR, etc.)
"""
return self.random.choice([
'A',
'AAAA',
'ALIAS',
'CNAME',
'MX',
'NS',
'PTR',
'SOA',
'SRV',
'TXT'
])
@property
def header_flags(self):
"""DNS Header flags
Returns:
str: A randomly selected DNS Header Flag
"""
return [self.random.choice([
'AA',
'TC',
'RD',
'RA',
'AD',
'CD',
'DO'
])]
@property
def id(self):
"""A random DNS ID value from 10000,100000
Returns:
int: A random DNS ID value
"""
return str(self.random.randint(10000,100000))
@property
def response_code(self):
"""A DNS Response Code
Returns:
str: A DNS response code as part of a response made during a DNS request
"""
return self.random.choice([
'NOERROR',
'NXDOMAIN',
'SERVFAIL',
'REFUSED'
])
@property
def op_code(self):
"""A DNS OP COde
Returns:
str: A random DNS OP Code for a DNS request
"""
return self.random.choice([
'QUERY',
'IQUERY',
'STATUS',
'NOTIFY',
'UPDATE',
'DSO'
])
def __get_dns_answer(self):
self._network = Network()
return {
'class': 'IN',
'data': self._network.ipv4,
'name': self.name,
'ttl': self.random.randint(10,400),
'type': self.record
}
@property
def answers(self):
"""A list of DNS answers during a DNS request
Returns:
list: A random list (count) of random DNS answers during a DNS request
"""
return_list = []
for i in range(self.random.randint(1,8)):
return_list.append(self.__get_dns_answer())
return return_list
@property
def question(self):
"""A DNS question during a DNS request
Returns:
dict: A random DNS question during a DNS request
"""
return {
'class': 'IN',
'name': Organization().name,
'registered_domain': Organization().domain,
'type': self.record
}
@property
def direction(self):
"""The direction of a DNS request
Returns:
str: Returns a direction for a DNS request or response
"""
return self.random.choice([
'inbound',
'outbound',
'internal',
'external',
'unknown'
])
@property
def name(self):
"""Returns a randomly generated DNS name
Returns:
str: A random DNS Name
"""
if not self.__TLD_LIST:
with open(self.__TLD_DATA_PATH, 'r') as tlds:
data = json.load(tlds)
for key,val in data.items():
self.__TLD_LIST.append(val)
try:
tld = self.random.choice(self.__TLD_LIST)
except:
tld = self.random.choice(self.__TLD_LIST)
return "{}.{}.{}".format(
self.random.choice(self.__WORD_LIST),
self.random.choice(self.__WORD_LIST),
tld
)
| 24.887574 | 106 | 0.497147 |
84192fc58f6d9531a1ec49d82869a1cc1ec4d962 | 2,296 | py | Python | dbstruct.py | uniurbit/choco-updater | 7e04d70be5dc022dc7460bfcb8c59aba30381235 | [
"MIT"
] | 2 | 2021-05-17T08:12:22.000Z | 2021-06-06T18:21:15.000Z | dbstruct.py | uniurbit/choco-updater | 7e04d70be5dc022dc7460bfcb8c59aba30381235 | [
"MIT"
] | null | null | null | dbstruct.py | uniurbit/choco-updater | 7e04d70be5dc022dc7460bfcb8c59aba30381235 | [
"MIT"
] | null | null | null | # Francesco Buresta @ Uniurb 20201210
# migrations
create_db = '''
CREATE DATABASE IF NOT EXISTS `choco_update` /*!40100 DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_0900_ai_ci */ /*!80016 DEFAULT ENCRYPTION='N' */;
USE `choco_update`;
'''
create_tbl_package = '''
--
-- Table structure for table `package`
--
CREATE TABLE IF NOT EXISTS `package` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`name` varchar(45) NOT NULL,
`description` varchar(255) DEFAULT NULL,
`choco_id` varchar(45) NOT NULL,
-- MySQL -- PRIMARY KEY (`id`),
UNIQUE (`choco_id`)
-- MySQL -- UNIQUE KEY `choco_id_UNIQUE` (`choco_id`)
)
-- ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
'''
create_tbl_package_update = '''
--
-- Table structure for table `pkg_update`
--
CREATE TABLE IF NOT EXISTS `pkg_update` (
`package_id` INTEGER NOT NULL,
`version` varchar(45) NOT NULL,
`update_timestamp` timestamp NULL DEFAULT NULL,
`fetch_timestamp` timestamp NOT NULL,
`status_id` int NOT NULL,
PRIMARY KEY (`package_id`,`version`),
-- MySQL -- KEY `fk_status_id_idx` (`status_id`),
FOREIGN KEY (`package_id`) REFERENCES `package` (`id`) ON DELETE CASCADE,
FOREIGN KEY (`status_id`) REFERENCES `status` (`id`)
-- MySQL -- CONSTRAINT `fk_package_id` FOREIGN KEY (`package_id`) REFERENCES `package` (`id`) ON DELETE CASCADE,
-- MySQL -- CONSTRAINT `fk_status_id` FOREIGN KEY (`status_id`) REFERENCES `status` (`id`)
)
-- ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
'''
create_tbl_status = '''
--
-- Table structure for table `status`
--
CREATE TABLE IF NOT EXISTS `status` (
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
`name` varchar(45) NOT NULL
)
-- ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_0900_ai_ci;
'''
'''-- MySQL --
CREATE TABLE IF NOT EXISTS `status` (
`id` int NOT NULL AUTO_INCREMENT,
`name` varchar(45) NOT NULL,
PRIMARY KEY (`id`)
)
'''
# seeders
insert_tbl_status = '''
INSERT INTO `status` VALUES (1,'pending'),(2,'updated'),(3,'skipped'),(4,'deleted');
'''
# MySQL
'''
LOCK TABLES `status` WRITE;
/*!40000 ALTER TABLE `status` DISABLE KEYS */;
INSERT INTO `status` VALUES (1,'pending'),(2,'updated'),(3,'skipped'),(4,'deleted');
/*!40000 ALTER TABLE `status` ENABLE KEYS */;
UNLOCK TABLES;
'''
| 28 | 150 | 0.693815 |
34559226b310c476982599c3518145af0aa7d6c9 | 2,295 | py | Python | golden/models.py | Chebichii-Lab/Awwards-Show | 3cb330cd1c11c89a12dee04db2378ec98f889a87 | [
"MIT"
] | null | null | null | golden/models.py | Chebichii-Lab/Awwards-Show | 3cb330cd1c11c89a12dee04db2378ec98f889a87 | [
"MIT"
] | null | null | null | golden/models.py | Chebichii-Lab/Awwards-Show | 3cb330cd1c11c89a12dee04db2378ec98f889a87 | [
"MIT"
] | null | null | null | from django.db import models
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
# Create your models here.
class Profile(models.Model):
profile_picture = CloudinaryField('image')
profile_bio = models.TextField()
profile_contact = models.CharField(max_length=60,blank=True)
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name="profile",primary_key=True)
def __str__(self):
return self.user.username
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
class Project(models.Model):
project_title = models.CharField(max_length=60,blank=True)
project_image = CloudinaryField('image')
project_description = models.TextField()
project_link = models.URLField(blank=True)
user = models.ForeignKey(User, null=True,on_delete=models.CASCADE)
profile = models.ForeignKey(Profile,null=True,on_delete=models.CASCADE)
def __str__(self):
return self.project_title
def save_project(self):
self.save()
def delete_project(self):
self.delete()
@classmethod
def project_by_id(cls,id):
project = Project.objects.filter(id =id)
return project
@classmethod
def search_by_project_title(cls,search_term):
projects = cls.objects.filter(project_title__icontains=search_term)
return projects
class Reviews(models.Model):
REVIEW_CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5'),
(6, '6'),
(7, '7'),
(8, '8'),
(9, '9'),
(10, '10'),
)
design = models.IntegerField(choices=REVIEW_CHOICES,default=0,blank=False)
usability = models.IntegerField(choices=REVIEW_CHOICES,default=0,blank=False)
content = models.IntegerField(choices=REVIEW_CHOICES,default=0,blank=False)
average = models.DecimalField(default=1,blank=False,decimal_places=2,max_digits=100)
project = models.ForeignKey(Project,null=True,on_delete=models.CASCADE)
user = models.ForeignKey(User,null=True,blank=True,on_delete=models.CASCADE)
def __str__(self):
return self.user
def save_review(self):
self.save()
def delete_review(self):
self.delete()
| 27.650602 | 104 | 0.674074 |
7d5fbb8d655e27991ddce8793a2e8611bd9ec5fc | 8,895 | py | Python | test/functional/import-rescan.py | badrkarni/saudicoin | 18c221ef1eb576d52bec53dcd449d2c6766684a2 | [
"MIT"
] | 8 | 2018-01-08T09:53:42.000Z | 2018-01-17T12:42:19.000Z | test/functional/import-rescan.py | badrkarni/saudicoin | 18c221ef1eb576d52bec53dcd449d2c6766684a2 | [
"MIT"
] | null | null | null | test/functional/import-rescan.py | badrkarni/saudicoin | 18c221ef1eb576d52bec53dcd449d2c6766684a2 | [
"MIT"
] | 1 | 2020-02-26T17:40:29.000Z | 2020-02-26T17:40:29.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet import RPCs.
Test rescan behavior of importaddress, importpubkey, importprivkey, and
importmulti RPCs with different types of keys and rescan options.
In the first part of the test, node 0 creates an address for each type of
import RPC call and sends BTC to it. Then other nodes import the addresses,
and the test makes listtransactions and getbalance calls to confirm that the
importing node either did or did not execute rescans picking up the send
transactions.
In the second part of the test, node 0 sends more BTC to each address, and the
test makes more listtransactions and getbalance calls to confirm that the
importing nodes pick up the new transactions regardless of whether rescans
happened previously.
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (assert_raises_rpc_error, connect_nodes, sync_blocks, assert_equal, set_node_times)
import collections
import enum
import itertools
Call = enum.Enum("Call", "single multi")
Data = enum.Enum("Data", "address pub priv")
Rescan = enum.Enum("Rescan", "no yes late_timestamp")
class Variant(collections.namedtuple("Variant", "call data rescan prune")):
"""Helper for importing one key and verifying scanned transactions."""
def try_rpc(self, func, *args, **kwargs):
if self.expect_disabled:
assert_raises_rpc_error(-4, "Rescan is disabled in pruned mode", func, *args, **kwargs)
else:
return func(*args, **kwargs)
def do_import(self, timestamp):
"""Call one key import RPC."""
if self.call == Call.single:
if self.data == Data.address:
response = self.try_rpc(self.node.importaddress, self.address["address"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.pub:
response = self.try_rpc(self.node.importpubkey, self.address["pubkey"], self.label,
self.rescan == Rescan.yes)
elif self.data == Data.priv:
response = self.try_rpc(self.node.importprivkey, self.key, self.label, self.rescan == Rescan.yes)
assert_equal(response, None)
elif self.call == Call.multi:
response = self.node.importmulti([{
"scriptPubKey": {
"address": self.address["address"]
},
"timestamp": timestamp + TIMESTAMP_WINDOW + (1 if self.rescan == Rescan.late_timestamp else 0),
"pubkeys": [self.address["pubkey"]] if self.data == Data.pub else [],
"keys": [self.key] if self.data == Data.priv else [],
"label": self.label,
"watchonly": self.data != Data.priv
}], {"rescan": self.rescan in (Rescan.yes, Rescan.late_timestamp)})
assert_equal(response, [{"success": True}])
def check(self, txid=None, amount=None, confirmations=None):
"""Verify that getbalance/listtransactions return expected values."""
balance = self.node.getbalance(self.label, 0, True)
assert_equal(balance, self.expected_balance)
txs = self.node.listtransactions(self.label, 10000, 0, True)
assert_equal(len(txs), self.expected_txs)
if txid is not None:
tx, = [tx for tx in txs if tx["txid"] == txid]
assert_equal(tx["account"], self.label)
assert_equal(tx["address"], self.address["address"])
assert_equal(tx["amount"], amount)
assert_equal(tx["category"], "receive")
assert_equal(tx["label"], self.label)
assert_equal(tx["txid"], txid)
assert_equal(tx["confirmations"], confirmations)
assert_equal("trusted" not in tx, True)
# Verify the transaction is correctly marked watchonly depending on
# whether the transaction pays to an imported public key or
# imported private key. The test setup ensures that transaction
# inputs will not be from watchonly keys (important because
# involvesWatchonly will be true if either the transaction output
# or inputs are watchonly).
if self.data != Data.priv:
assert_equal(tx["involvesWatchonly"], True)
else:
assert_equal("involvesWatchonly" not in tx, True)
# List of Variants for each way a key or address could be imported.
IMPORT_VARIANTS = [Variant(*variants) for variants in itertools.product(Call, Data, Rescan, (False, True))]
# List of nodes to import keys to. Half the nodes will have pruning disabled,
# half will have it enabled. Different nodes will be used for imports that are
# expected to cause rescans, and imports that are not expected to cause
# rescans, in order to prevent rescans during later imports picking up
# transactions associated with earlier imports. This makes it easier to keep
# track of expected balances and transactions.
ImportNode = collections.namedtuple("ImportNode", "prune rescan")
IMPORT_NODES = [ImportNode(*fields) for fields in itertools.product((False, True), repeat=2)]
# Rescans start at the earliest block up to 2 hours before the key timestamp.
TIMESTAMP_WINDOW = 2 * 60 * 60
class ImportRescanTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2 + len(IMPORT_NODES)
def setup_network(self):
extra_args = [[] for _ in range(self.num_nodes)]
for i, import_node in enumerate(IMPORT_NODES, 2):
if import_node.prune:
extra_args[i] += ["-prune=1"]
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
for i in range(1, self.num_nodes):
connect_nodes(self.nodes[i], 0)
def run_test(self):
# Create one transaction on node 0 with a unique amount and label for
# each possible type of wallet import RPC.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.label = "label {} {}".format(i, variant)
variant.address = self.nodes[1].validateaddress(self.nodes[1].getnewaddress(variant.label))
variant.key = self.nodes[1].dumpprivkey(variant.address["address"])
variant.initial_amount = 10 - (i + 1) / 4.0
variant.initial_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.initial_amount)
# Generate a block containing the initial transactions, then another
# block further in the future (past the rescan window).
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
timestamp = self.nodes[0].getblockheader(self.nodes[0].getbestblockhash())["time"]
set_node_times(self.nodes, timestamp + TIMESTAMP_WINDOW + 1)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# For each variation of wallet key import, invoke the import RPC and
# check the results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
variant.expect_disabled = variant.rescan == Rescan.yes and variant.prune and variant.call == Call.single
expect_rescan = variant.rescan == Rescan.yes and not variant.expect_disabled
variant.node = self.nodes[2 + IMPORT_NODES.index(ImportNode(variant.prune, expect_rescan))]
variant.do_import(timestamp)
if expect_rescan:
variant.expected_balance = variant.initial_amount
variant.expected_txs = 1
variant.check(variant.initial_txid, variant.initial_amount, 2)
else:
variant.expected_balance = 0
variant.expected_txs = 0
variant.check()
# Create new transactions sending to each address.
for i, variant in enumerate(IMPORT_VARIANTS):
variant.sent_amount = 10 - (2 * i + 1) / 8.0
variant.sent_txid = self.nodes[0].sendtoaddress(variant.address["address"], variant.sent_amount)
# Generate a block containing the new transactions.
self.nodes[0].generate(1)
assert_equal(self.nodes[0].getrawmempool(), [])
sync_blocks(self.nodes)
# Check the latest results from getbalance and listtransactions.
for variant in IMPORT_VARIANTS:
if not variant.expect_disabled:
variant.expected_balance += variant.sent_amount
variant.expected_txs += 1
variant.check(variant.sent_txid, variant.sent_amount, 1)
else:
variant.check()
if __name__ == "__main__":
ImportRescanTest().main()
| 47.31383 | 116 | 0.6543 |
683f21be93621053e4b47b19abe7cc866b9921b5 | 1,493 | py | Python | google/ads/googleads/v9/enums/types/promotion_placeholder_field.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/enums/types/promotion_placeholder_field.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | google/ads/googleads/v9/enums/types/promotion_placeholder_field.py | JakobSteixner/google-ads-python | df2b802cc7e78295a4ece21cc7ef3787cd35dab0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"PromotionPlaceholderFieldEnum",},
)
class PromotionPlaceholderFieldEnum(proto.Message):
r"""Values for Promotion placeholder fields.
"""
class PromotionPlaceholderField(proto.Enum):
r"""Possible values for Promotion placeholder fields."""
UNSPECIFIED = 0
UNKNOWN = 1
PROMOTION_TARGET = 2
DISCOUNT_MODIFIER = 3
PERCENT_OFF = 4
MONEY_AMOUNT_OFF = 5
PROMOTION_CODE = 6
ORDERS_OVER_AMOUNT = 7
PROMOTION_START = 8
PROMOTION_END = 9
OCCASION = 10
FINAL_URLS = 11
FINAL_MOBILE_URLS = 12
TRACKING_URL = 13
LANGUAGE = 14
FINAL_URL_SUFFIX = 15
__all__ = tuple(sorted(__protobuf__.manifest))
| 29.27451 | 74 | 0.684528 |
5c343c5675a10e9f3670c42c9dfa3c782b76808a | 6,904 | py | Python | Lib/site-packages/wikiapi/wikiapi.py | Srinath-tr/Goferbot | 0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9 | [
"Apache-2.0",
"bzip2-1.0.6"
] | 1 | 2019-04-23T21:50:08.000Z | 2019-04-23T21:50:08.000Z | Lib/site-packages/wikiapi/wikiapi.py | Srinath-tr/Goferbot | 0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9 | [
"Apache-2.0",
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/wikiapi/wikiapi.py | Srinath-tr/Goferbot | 0f734d01c6504c6c97dbdf45f5adf8b25c0f9fd9 | [
"Apache-2.0",
"bzip2-1.0.6"
] | 2 | 2019-02-14T08:13:33.000Z | 2019-04-23T21:47:48.000Z | import hashlib
import logging
import os
import re
from xml.dom import minidom
import six
import requests
from bs4 import BeautifulSoup
from pyquery import PyQuery
logger = logging.getLogger(__name__)
uri_scheme = 'https'
api_uri = 'wikipedia.org/w/api.php'
article_uri = 'wikipedia.org/wiki/'
# common sub sections to exclude from output
UNWANTED_SECTIONS = (
'External links and resources',
'External links',
'Navigation menu',
'See also',
'References',
'Further reading',
'Contents',
'Official',
'Other',
'Notes',
)
class WikiApi(object):
def __init__(self, options=None):
if options is None:
options = {}
self.options = options
if 'locale' not in options:
self.options['locale'] = 'en'
self.caching_enabled = True if options.get('cache') is True else False
self.cache_dir = options.get('cache_dir') or '/tmp/wikiapicache'
def find(self, terms):
search_params = {
'action': 'opensearch',
'search': terms,
'format': 'xml'
}
url = "{scheme}://{locale_sub}.{hostname_path}".format(
scheme=uri_scheme,
locale_sub=self.options['locale'],
hostname_path=api_uri
)
resp = self.get(url, search_params)
logger.debug('find "%s" response: %s', terms, resp)
# parse search results
xmldoc = minidom.parseString(resp)
items = xmldoc.getElementsByTagName('Item')
# return results as wiki page titles
results = []
for item in items:
link = item.getElementsByTagName('Url')[0].firstChild.data
slug = re.findall(r'wiki/(.+)', link, re.IGNORECASE)
results.append(slug[0])
return results
def get_article(self, title):
url = '{scheme}://{locale_sub}.{hostname_path}{article_title}'.format(
scheme=uri_scheme,
locale_sub=self.options['locale'],
hostname_path=article_uri,
article_title=title
)
html = PyQuery(self.get(url))
data = {}
# parse wiki data
data['heading'] = html('#firstHeading').text()
paras = html('.mw-content-ltr').find('p')
data['image'] = 'http:{0}'.format(
html('body').find('.image img').attr('src'))
data['summary'] = ""
data['full'] = ""
references = html('body').find('.references')
data['url'] = url
# gather references
data['references'] = []
for ref in references.items():
data['references'].append(self._strip_text(ref.text()))
# gather summary
summary_max = 900
chars = 0
for pgraph in paras.items():
if chars < summary_max:
chars += len(pgraph.text())
text_no_tags = self._strip_html(pgraph.outer_html())
stripped_summary = self._strip_text(text_no_tags)
data['summary'] += stripped_summary
# gather full content
for idx, line in enumerate(html('body').find('h2, p').items()):
if idx == 0:
data['full'] += data['heading']
clean_text = self._strip_text(line.text())
if clean_text:
data['full'] += '\n\n' + clean_text
data['full'] = self._remove_ads_from_content(data['full']).strip()
article = Article(data)
return article
@staticmethod
def _strip_html(text): # pragma: no cover
return BeautifulSoup(text).text
def get_relevant_article(self, results, keywords):
"""
Get the most relevant article from the results of find(),
using a list of keywords and checking for them in article.summary
"""
for result in results:
article = self.get_article(result)
summary_words = article.summary.split(' ')
has_words = any(word in summary_words for word in keywords)
if has_words:
return article
return None
def _get_cache_item_path(self, url, params):
"""
Generates a cache location for a given api call.
Returns a file path
"""
cache_dir = self.cache_dir
m = hashlib.md5()
hash_str = '{0}{1}'.format(six.text_type(url), six.text_type(params))
m.update(hash_str.encode('utf-8'))
cache_key = m.hexdigest()
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
return os.path.join(cache_dir, cache_key + '.cache')
def _get_cached_response(self, file_path):
""" Retrieves response from cache """
if os.path.exists(file_path):
logger.info('retrieving from WikiApi cache: %s', file_path)
with open(file_path, 'rb') as resp_data:
# import pytest; pytest.set_trace()
cached_resp = resp_data.read()
return cached_resp
@staticmethod
def _cache_response(file_path, resp):
with open(file_path, 'wb') as f:
f.write(resp)
def get(self, url, params={}):
if self.caching_enabled:
cached_item_path = self._get_cache_item_path(
url=url,
params=params
)
cached_resp = self._get_cached_response(cached_item_path)
if cached_resp:
return cached_resp
resp = requests.get(url, params=params)
resp_content = resp.content
if self.caching_enabled:
self._cache_response(cached_item_path, resp_content)
return resp_content
def _strip_text(self, string):
"""Removed unwanted information from article test"""
# remove citation numbers
string = re.sub(r'\[\d+]', '', string)
# correct spacing around fullstops + commas
string = re.sub(r' +[.] +', '. ', string)
string = re.sub(r' +[,] +', ', ', string)
# remove sub heading edits tags
string = re.sub(r'\s*\[\s*edit\s*\]\s*', '\n', string)
# remove unwanted areas
string = re.sub(
'|'.join(UNWANTED_SECTIONS), '', string, re.I | re.M | re.S
)
return string
@staticmethod
def _remove_ads_from_content(bio_text):
"""Returns article content without references to Wikipedia"""
pattern = r'([^.]*?Wikipedia[^.]*\.)'
return re.sub(pattern, '', bio_text)
class Article(object):
def __init__(self, data=None):
data = data or {}
self.heading = data.get('heading')
self.image = data.get('image')
self.summary = data.get('summary')
self.content = data.get('full')
self.references = data.get('references')
self.url = data.get('url')
def __repr__(self):
return '<wikiapi.Article {0}>'.format(self.heading)
| 31.381818 | 78 | 0.57445 |
14ad82b116c5293cb9cecef801eaf6a5fba4b189 | 839 | py | Python | setup.py | pjflanagan/nightpro-for-gopro | a6a51e71083b871474bf2f4367cf16b473003a41 | [
"MIT"
] | 1 | 2020-12-05T18:19:17.000Z | 2020-12-05T18:19:17.000Z | setup.py | pjflanagan/nightpro-for-gopro | a6a51e71083b871474bf2f4367cf16b473003a41 | [
"MIT"
] | null | null | null | setup.py | pjflanagan/nightpro-for-gopro | a6a51e71083b871474bf2f4367cf16b473003a41 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='nightpro-for-gopro',
version='1.0.1',
url='https://github.com/pjflanagan/nightpro-for-gopro',
author='Peter James Flanagan',
author_email='pj@pjflanagan.me',
description='A nightlapse maker for GoPro',
long_description=long_description,
long_description_content_type='text/markdown',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
python_requires='>=3.6',
packages=find_packages(),
entry_points={
'console_scripts': [
'nightpro = nightpro.main:main'
]
},
keywords='GoPro nightlapse video'
)
| 27.966667 | 59 | 0.640048 |
3facea7f06e3d7f63ff00a3d0c2ebbc90bc10e9c | 807 | py | Python | danbot_api/errors.py | Makiyu-py/danbot-hosting0 | 654573105a3dafff417bb93f3abe7df817efbee3 | [
"MIT"
] | 3 | 2021-02-28T05:40:46.000Z | 2021-10-31T14:25:15.000Z | danbot_api/errors.py | Makiyu-py/danbot-hosting0 | 654573105a3dafff417bb93f3abe7df817efbee3 | [
"MIT"
] | 4 | 2021-02-26T11:10:37.000Z | 2022-03-21T11:45:46.000Z | danbot_api/errors.py | Makiyu-py/danbot-hosting0 | 654573105a3dafff417bb93f3abe7df817efbee3 | [
"MIT"
] | 3 | 2021-06-16T13:31:35.000Z | 2022-03-31T17:13:58.000Z | class DBHException(Exception):
""" Base exception class for this module
So all of the errors/exceptions below/after this would be a subclass of this
"""
pass
class NotAllowed(DBHException):
""" Error when you give an invalid dbh API key
"""
pass
class HTTPException(DBHException):
""" The exception for the http errors
"""
pass
class ServerError(HTTPException):
""" Gives you this error when the response status is higher than 500
"""
def __init__(self, status_code: int):
super().__init__(f"DanBot Hosting Server Error. Status Code: {status_code}")
class APIError(HTTPException):
"""Given when the API itself gives out an error
"""
def __init__(self, msg):
super().__init__(f"The API has given you an error: {msg}")
| 23.735294 | 84 | 0.670384 |
296f03671b5d981f418a75e4d5e7bd12056d7728 | 76 | py | Python | venv/Lib/site-packages/psychopy/tests/test_psychojs/__init__.py | mintzer/pupillometry-rf-back | cfa86fa984a49dce0123798f8de5b838c02e10d5 | [
"CC-BY-4.0"
] | null | null | null | venv/Lib/site-packages/psychopy/tests/test_psychojs/__init__.py | mintzer/pupillometry-rf-back | cfa86fa984a49dce0123798f8de5b838c02e10d5 | [
"CC-BY-4.0"
] | null | null | null | venv/Lib/site-packages/psychopy/tests/test_psychojs/__init__.py | mintzer/pupillometry-rf-back | cfa86fa984a49dce0123798f8de5b838c02e10d5 | [
"CC-BY-4.0"
] | null | null | null | """Testing PsychoJS, but also see test_app/test_builder/test_compileJS.py""" | 76 | 76 | 0.802632 |
246b4d15ae59db079ad14f9499b124347b5790be | 188 | py | Python | imow/common/exceptions.py | ChrisHaPunkt/stihl-imow-webapi | 8d2f57c4e992a6bb57d592009f464326acb9a5a1 | [
"MIT"
] | 10 | 2021-05-15T21:12:43.000Z | 2022-02-01T11:11:48.000Z | imow/common/exceptions.py | ChrisHaPunkt/stihl-imow-webapi | 8d2f57c4e992a6bb57d592009f464326acb9a5a1 | [
"MIT"
] | 4 | 2021-05-15T22:51:05.000Z | 2021-06-17T09:41:49.000Z | imow/common/exceptions.py | ChrisHaPunkt/stihl-imow-webapi | 8d2f57c4e992a6bb57d592009f464326acb9a5a1 | [
"MIT"
] | 5 | 2021-05-26T14:01:03.000Z | 2021-06-16T14:50:17.000Z | class LoginError(Exception):
pass
class ApiMaintenanceError(Exception):
pass
class MessageNotFoundError(Exception):
pass
class LanguageNotFoundError(Exception):
pass
| 12.533333 | 39 | 0.755319 |
4549f946de218dba0de4a702125ddb423f06ab31 | 5,235 | py | Python | egg_mayavi.py | lengyuner/fly_video_analysis | 74999d2d91b4ae53f8896270691384674cf022b3 | [
"Apache-2.0"
] | null | null | null | egg_mayavi.py | lengyuner/fly_video_analysis | 74999d2d91b4ae53f8896270691384674cf022b3 | [
"Apache-2.0"
] | null | null | null | egg_mayavi.py | lengyuner/fly_video_analysis | 74999d2d91b4ae53f8896270691384674cf022b3 | [
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import sympy
import numpy as np
from mayavi import mlab
# from sympy import *
# from mayavi.mlab import *
# def Demo():
# x, y, z = np.mgrid[0:1:20j, 0:1:20j, 0:1:20j]
#
# u = -y
# v = x
# w = z # 矢量场三坐标分量表达式
#
# x = sympy.Symbol('x') # 引入符合x
# expr = sympy.sin(x) / x # 表达式形式
# f = sympy.lambdify(x, expr, "numpy") # 表达式中x符合用数组代替
# data = np.linspace(1, 10, 10000) # 取数组1至10,线性分割1000份
#
# print(data)
# print(f(data)) # 将数组data带入表达式
#
# mlab.quiver3d(u, v, w) # 绘制矢量场
# mlab.outline() # 绘制边框
def test_flow():
x, y, z = np.mgrid[-5:5:40j, -5:54:40j, 0:4:20j] # x y z网格化,形成填充三维坐标数组
u = y # 矢量场x分量
v = -x # 矢量场y分量
w = np.ones_like(z) * 0.05 # 数组用1来填充 #矢量场z分量
mlab.quiver3d(u, v, w, mask_points=10) # 绘制矢量场
obj = mlab.flow(u, v, w) # 在矢量场中放置可移动物体以检查流场
return obj
test_flow()
# mlab.quiver3d(u, v, w) # 绘制矢量场
# mlab.outline() # 绘制边框
# test_flow()
def draw_speed_mayavi(stream_map, n_sacle=3, plt_density=5):
# max(stream_map[:, 1])
# max(stream_map[:, 2])
max_x = int(max(stream_map[:, 1]) / n_sacle) + 1
max_y = int(max(stream_map[:, 2]) / n_sacle) + 1
if abs(max_x - max_y) > 2:
print('x and y have not benn modified. break.')
# return None
else:
max_x_y = max(max_x, max_y)
speed_x_streamplot = np.zeros([max_x_y, max_x_y])
speed_y_streamplot = np.zeros([max_x_y, max_x_y])
speed_count_streamplot = np.ones([max_x_y, max_x_y])
for K_0 in range(len(stream_map)):
# y = stream_map[K_0, 2] / n_sacle
# y = (max(orentation_np[:, 2]) - orentation_np[K_0, 2]) / n_sacle
x = stream_map[K_0, 1] / n_sacle
y = (max(stream_map[:, 2]) - stream_map[K_0, 2]) / n_sacle
speed_x_streamplot[int(y), int(x)] += stream_map[K_0, 3]
speed_y_streamplot[int(y), int(x)] -= stream_map[K_0, 4]
# speed_y_streamplot[int(y), int(x)] += stream_map[K_0, 4]
speed_count_streamplot[int(y), int(x)] += 1
# for K_1 in range(speed_x_streamplot.shape[0]):
# for K_2
speed_x_streamplot /= speed_count_streamplot
speed_y_streamplot /= speed_count_streamplot
print(speed_x_streamplot[:4, :4])
print(speed_x_streamplot.shape)
# w = 3
Y, X = np.mgrid[0:max_x_y, 0:max_x_y]
# U = -1 - X ** 2 + Y
# V = 1 + X - Y ** 2
# speed = np.sqrt(U ** 2 + V ** 2)
# fig = plt.figure()
U = speed_x_streamplot
V = speed_y_streamplot
plt.figure()
plt.streamplot(X, Y, U, V, density=[plt_density, plt_density])
# plt.set_title('Speed')
plt.title('Speed')
return None
video_name = '../data/video_CS_20201031_h_0_to_h_13/video_CS_20201031_h_0_to_h_13_552_713_239_447_4.avi'
position_name = video_name[:-4] + '_position.npy'
position_np = np.load(position_name)
print(position_np.shape)
from egg_streamplot import get_speed
speed_np = get_speed(position_np, distance_threshold=10,frame_interval=10, save_interval=100)
stream_map = np.copy(speed_np)
print(stream_map.shape)
stream_map
n_sacle=3
plt_density=5
# max(stream_map[:, 1])
# max(stream_map[:, 2])
max_x = int(max(stream_map[:, 1]) / n_sacle) + 1
max_y = int(max(stream_map[:, 2]) / n_sacle) + 1
if abs(max_x - max_y) > 2:
print('x and y have not benn modified. break.')
# return None
# else:
max_x_y = max(max_x, max_y)
speed_x_streamplot = np.zeros([max_x_y, max_x_y, max_x_y])
speed_y_streamplot = np.zeros([max_x_y, max_x_y, max_x_y])
speed_count_streamplot = np.ones([max_x_y, max_x_y, max_x_y])
len_peroid = int(len(stream_map/max_x_y))+1
for K_0 in range(max_x_y):#len(stream_map)):
# y = stream_map[K_0, 2] / n_sacle
# y = (max(orentation_np[:, 2]) - orentation_np[K_0, 2]) / n_sacle
x = stream_map[K_0, 1] / n_sacle
y = (max(stream_map[:, 2]) - stream_map[K_0, 2]) / n_sacle
# for K_1 in range(len_peroid):
# speed_x_streamplot[int(y), int(x), K_1] += stream_map[K_0, 3]
# speed_y_streamplot[int(y), int(x), K_1] -= stream_map[K_0, 4]
# speed_count_streamplot[int(y), int(x), K_0] += 1
speed_x_streamplot[int(y), int(x), K_0] += stream_map[K_0, 3]
speed_y_streamplot[int(y), int(x), K_0] -= stream_map[K_0, 4]
# speed_y_streamplot[int(y), int(x)] += stream_map[K_0, 4]
speed_count_streamplot[int(y), int(x), K_0] += 1
# for K_1 in range(speed_x_streamplot.shape[0]):
# for K_2
speed_x_streamplot /= speed_count_streamplot
speed_y_streamplot /= speed_count_streamplot
print(speed_x_streamplot[:4, :4,:4])
print(speed_x_streamplot.shape)
# w = 3
Y, X, Z = np.mgrid[0:max_x_y, 0:max_x_y, 0:len(stream_map)]
# U = -1 - X ** 2 + Y
# V = 1 + X - Y ** 2
# speed = np.sqrt(U ** 2 + V ** 2)
# fig = plt.figure()
U = speed_x_streamplot
V = speed_y_streamplot
# W = np.zeros([max_x_y, max_x_y, len(stream_map)])
W = np.ones([max_x_y, max_x_y, max_x_y])
mlab.quiver3d(U, V, W, mask_points=10) # 绘制矢量场
obj = mlab.flow(U, V, W)
plt.figure()
plt.streamplot(X, Y, Z, U, V, W, density=[plt_density, plt_density])
# plt.set_title('Speed')
plt.title('Speed')
| 26.984536 | 104 | 0.627507 |
6c7c76cfd8db1036d1c2673c5e5317b26f8d89ec | 4,343 | py | Python | train_models/wrn16_8_stl_d8d4d1/train_model.py | kaikai581/petals-to-the-metal | ec629c954bc46fcc4641d005415485ddf3c04498 | [
"MIT"
] | null | null | null | train_models/wrn16_8_stl_d8d4d1/train_model.py | kaikai581/petals-to-the-metal | ec629c954bc46fcc4641d005415485ddf3c04498 | [
"MIT"
] | null | null | null | train_models/wrn16_8_stl_d8d4d1/train_model.py | kaikai581/petals-to-the-metal | ec629c954bc46fcc4641d005415485ddf3c04498 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
'''
This script trains one of the best performing networks from the paper, wrn28 without reflection symmetry.
Since this network is optimized for the STL dataset, images are resized to 96x96 from the original size 224x224.
'''
import os, sys
from torchvision.transforms.transforms import Resize
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../..'))
from torchvision import transforms
from utilities.e2wrn import wrn16_8_stl_d8d4d1
from utilities.helpers import *
import argparse
if __name__ == '__main__':
# command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--load_weights', type=str, default='models/wrn16_8_stl_d8d4d1_lr1.0e-03_epoch2.pt')
parser.add_argument('-lr', '--learning_rate', type=float, default=1e-3)
parser.add_argument('-n', '--nepochs', type=int, default=25)
parser.add_argument('-r', '--resize_pixel', type=int, default=96)
parser.add_argument('-s', '--scheduler_step', type=int, default=50, help='Number of epochs for the scheduler to kick in. No effect with ADAM.')
parser.add_argument('--use_adam', action='store_true')
args = parser.parse_args()
pretrained_weights_fpn = args.load_weights
nepochs = args.nepochs
img_size = args.resize_pixel
learning_rate = args.learning_rate
scheduler_step = args.scheduler_step
use_adam = args.use_adam
# get the remaining number of epochs
pretrain_epochs = 0
if os.path.exists(pretrained_weights_fpn):
for tmpstr in os.path.splitext(pretrained_weights_fpn)[0].split('_'):
if 'epoch' in tmpstr:
pretrain_epochs = int(tmpstr.lstrip('epoch'))
remaining_epochs = nepochs - pretrain_epochs
# Data augmentation and normalization for training
data_transforms = {
'train': transforms.Compose([
transforms.Resize(img_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(img_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# print device information
print('Device being used:', torch.cuda.get_device_name(0))
# preview several images
visualize_images(data_transforms)
print('Some image previews are saved to plots/preview_data.png.')
# It turns out constructing a e2cnn model takes a lot of time. (~10 minutes)
e2cnn_model1 = wrn16_8_stl_d8d4d1(num_classes=get_nclasses(data_transforms))
e2cnn_model1 = e2cnn_model1.to(device)
# if model is not trained, train it
# otherwise, evaluate the model
model_fpn = 'models/{}x{}/wrn16_8_stl_d8d4d1_lr{:1.1e}_{}_epoch{}.pt'.format(img_size, img_size, learning_rate, 'adam' if use_adam else 'sgd', nepochs)
if not os.path.exists(model_fpn):
loss_function = torch.nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
if not use_adam:
optimizer_ft = torch.optim.SGD(e2cnn_model1.parameters(), lr=learning_rate, momentum=0.9)
else:
optimizer_ft = torch.optim.Adam(e2cnn_model1.parameters(), lr=learning_rate)
# Decay LR by a factor of 0.1 every 7 epochs
if not use_adam:
exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=scheduler_step, gamma=0.1)
else:
exp_lr_scheduler = None
# load pretrained weights if exist
if os.path.exists(pretrained_weights_fpn):
e2cnn_model1.load_state_dict(torch.load(pretrained_weights_fpn))
###
### Train and evaluate
###
e2cnn_model1 = train_model(e2cnn_model1, loss_function, optimizer_ft, exp_lr_scheduler, data_transforms,
num_epochs=remaining_epochs, start_epoch=pretrain_epochs)
# save the trained model
easy_savemodel(e2cnn_model1, model_fpn)
else:
e2cnn_model1.load_state_dict(torch.load(model_fpn))
e2cnn_model1.eval()
# show some plots with prediction
visualize_model(e2cnn_model1, data_transforms)
easy_savefig(outfpn='plots/visualize_model.png')
| 41.361905 | 155 | 0.683168 |
8530aa9e38f3dcf6d035d4a2392497582bc4f9aa | 3,919 | py | Python | lldb_commands/yoink.py | lanza/ds-lldb | fdab1addb05c571460ad31b3256688a223a5022a | [
"MIT"
] | null | null | null | lldb_commands/yoink.py | lanza/ds-lldb | fdab1addb05c571460ad31b3256688a223a5022a | [
"MIT"
] | null | null | null | lldb_commands/yoink.py | lanza/ds-lldb | fdab1addb05c571460ad31b3256688a223a5022a | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2017 Derek Selander
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import lldb
import os
import shlex
import optparse
def __lldb_init_module(debugger, internal_dict):
debugger.HandleCommand(
'command script add -f yoink.yoink yoink -h "Copies contents of remote contents to local computer"')
def yoink(debugger, command, exe_ctx, result, internal_dict):
'''
Takes a path on a iOS/tvOS/watchOS and writes to the /tmp/ dir on your computer.
If it can be read by -[NSData dataWithContentsOfFile:], it can be written to disk
Example (on iOS 10 device):
yoink /System/Library/Messages/iMessageEffects/CKConfettiEffect.bundle/CKConfettiEffect
'''
command_args = shlex.split(command)
parser = generate_option_parser()
try:
(options, args) = parser.parse_args(command_args)
except:
result.SetError(parser.usage)
return
clean_command = ('').join(args)
command_script = '''expression -lobjc -O -- @import Foundation; id data = [NSData dataWithContentsOfFile:@\"{}\"];
[NSString stringWithFormat:@\"%p,%p,%p\", data, (uintptr_t)[data bytes], (uintptr_t)[data length] + (uintptr_t)[data bytes]]'''.format(clean_command)
res = lldb.SBCommandReturnObject()
interpreter = debugger.GetCommandInterpreter()
interpreter.HandleCommand(command_script, res)
if not res.HasResult():
result.SetError('There\'s no result')
return
response = res.GetOutput().split(',')
if len(response) is not 3:
result.SetError('Bad Fromatting')
return
if int(response[0], 16) is 0:
result.SetError('Couldn\'t open file {}'.format(clean_command))
return
basename = os.path.basename(clean_command)
debugger.HandleCommand(
'memory read {} {} -r -b -o /tmp/{}'.format(response[1], response[2], basename))
interpreter.HandleCommand('po [{} dealloc]'.format(response[0]), res)
fullpath = '/tmp/{}'.format(basename)
if options.open_immediately:
print('Opening file...')
os.system('open \"{}\"'.format(fullpath))
else:
os.system('open -R ' + fullpath)
print('Opening \"{}\"...'.format(fullpath))
def generate_option_parser():
usage = "usage: %prog [options] path/to/item"
parser = optparse.OptionParser(usage=usage, prog="yoink")
parser.add_option("-o", "--open_immediately",
action="store_true",
default=False,
dest="open_immediately",
help="Opens the copied item immediately using the default 'open' cmd, useful for pics")
parser.add_option("-c", "--copy_file_path",
action="store_true",
default=False,
dest="copy_file_path",
help="Copies the file path to the clipboard")
return parser
| 37.682692 | 149 | 0.674407 |
70fb6d81708e7f2fd64c7ee50b29fe9382634eb6 | 3,836 | py | Python | lambdausb/boards/usbsniffer.py | zignig/lambdaUSB | 70e01e9d705e4d18e145f5e3f50ff7216c24e101 | [
"BSD-2-Clause"
] | null | null | null | lambdausb/boards/usbsniffer.py | zignig/lambdaUSB | 70e01e9d705e4d18e145f5e3f50ff7216c24e101 | [
"BSD-2-Clause"
] | null | null | null | lambdausb/boards/usbsniffer.py | zignig/lambdaUSB | 70e01e9d705e4d18e145f5e3f50ff7216c24e101 | [
"BSD-2-Clause"
] | null | null | null | from nmigen.build import *
from nmigen.vendor.xilinx_7series import *
__all__ = ["USBSnifferPlatform"]
class USBSnifferPlatform(Xilinx7SeriesPlatform):
device = "xc7a35t"
package = "fgg484"
speed = "2"
default_clk = "clk100"
resources = [
Resource("clk100", 0, Pins("J19", dir="i"), Clock(100e6), Attrs(IOSTANDARD="LVCMOS33")),
Resource("rgb_led", 0,
Subsignal("r", PinsN("W2", dir="o")),
Subsignal("g", PinsN("Y1", dir="o")),
Subsignal("b", PinsN("W1", dir="o")),
Attrs(IOSTANDARD="LVCMOS33")
),
Resource("rgb_led", 1,
Subsignal("r", PinsN("AA1", dir="o")),
Subsignal("g", PinsN("AB1", dir="o")),
Subsignal("b", PinsN("Y2" , dir="o")),
Attrs(IOSTANDARD="LVCMOS33")
),
Resource("serial", 0,
Subsignal("tx", Pins("U21", dir="o")), # FPGA_GPIO0
Subsignal("rx", Pins("T21", dir="i")), # FPGA_GPIO1
Attrs(IOSTANDARD="LVCMOS33")
),
Resource("ulpi_sw", 0,
Subsignal("s", Pins("Y8", dir="o")),
Subsignal("oe", PinsN("Y9", dir="o")),
Attrs(IOSTANDARD="LVCMOS33")
),
Resource("ulpi", 0,
Subsignal("clk", Pins("W19", dir="i"), Clock(60e6)),
Subsignal("data", Pins("AB18 AA18 AA19 AB20 AA20 AB21 AA21 AB22", dir="io")),
Subsignal("dir", Pins("W21", dir="i")),
Subsignal("stp", Pins("Y22", dir="o")),
Subsignal("nxt", Pins("W22", dir="i")),
Subsignal("rst", Pins("V20", dir="o")),
Attrs(IOSTANDARD="LVCMOS33", SLEW="FAST")
),
Resource("ulpi", 1,
Subsignal("clk", Pins("V4", dir="i"), Clock(60e6)),
Subsignal("data", Pins("AB2 AA3 AB3 Y4 AA4 AB5 AA5 AB6", dir="io")),
Subsignal("dir", Pins("AB7", dir="i")),
Subsignal("stp", Pins("AA6", dir="o")),
Subsignal("nxt", Pins("AB8", dir="i")),
Subsignal("rst", Pins("AA8", dir="o")),
Attrs(IOSTANDARD="LVCMOS33", SLEW="FAST")
),
Resource("ft601", 0,
Subsignal("clk", Pins("D17", dir="i"), Clock(100e6)),
Subsignal("rst", Pins("K22", dir="o")),
Subsignal("data", Pins("A16 F14 A15 F13 A14 E14 A13 E13 B13 C15 C13 C14 B16 E17 B15 F16 "
"A20 E18 B20 F18 D19 D21 E19 E21 A21 B21 A19 A18 F20 F19 B18 B17", dir="io")),
Subsignal("be", Pins("K16 L16 G20 H20", dir="o")),
Subsignal("rxf_n", Pins("M13", dir="i")),
Subsignal("txe_n", Pins("L13", dir="i")),
Subsignal("rd_n", Pins("K19", dir="o")),
Subsignal("wr_n", Pins("M15", dir="o")),
Subsignal("oe_n", Pins("L21", dir="o")),
Subsignal("siwua", Pins("M16", dir="o")),
Attrs(IOSTANDARD="LVCMOS33", SLEW="FAST")
)
]
connectors = []
def toolchain_prepare(self, fragment, name, **kwargs):
overrides = {
"script_before_bitstream":
"set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]",
"script_after_bitstream":
"write_cfgmem -force -format bin -interface spix4 -size 16 "
"-loadbit \"up 0x0 {name}.bit\" -file {name}.bin".format(name=name),
"add_constraints":
"set_property INTERNAL_VREF 0.675 [get_iobanks 34]"
}
return super().toolchain_prepare(fragment, name, **overrides, **kwargs)
def toolchain_program(self, products, name):
xc3sprog = os.environ.get("XC3SPROG", "xc3sprog")
with products.extract("{}.bit".format(name)) as bitstream_filename:
subprocess.run([xc3sprog, "-c", "ft4232h", bitstream_filename], check=True)
| 39.958333 | 113 | 0.52268 |
061ac6047c8f655f558e6d99d6b544ef851810ac | 862 | py | Python | tests/test_getitem.py | idfah/regenerator | 933e690b122df86b1d4b8404a47ea227eb478f4c | [
"BSD-2-Clause"
] | 3 | 2020-12-11T21:42:52.000Z | 2021-05-27T16:50:12.000Z | tests/test_getitem.py | idfah/regenerator | 933e690b122df86b1d4b8404a47ea227eb478f4c | [
"BSD-2-Clause"
] | null | null | null | tests/test_getitem.py | idfah/regenerator | 933e690b122df86b1d4b8404a47ea227eb478f4c | [
"BSD-2-Clause"
] | 1 | 2020-12-17T15:51:49.000Z | 2020-12-17T15:51:49.000Z | import pytest
from regenerator import Stream
from .util import *
def test_getitem_idx(list_stream):
item = list_stream[3]
assert item == 3
def test_negative_idx(list_stream):
with pytest.raises(IndexError):
list_stream[-1]
def test_str_idx(list_stream):
with pytest.raises(TypeError):
list_stream['junk']
def test_idx_out_of_range(list_stream):
with pytest.raises(IndexError):
list_stream[100_000]
def test_slice(list_stream):
slice_list = list_stream[2:4]
assert isinstance(slice_list, Stream)
assert len(slice_list) == 2
def test_empty(empty_stream):
with pytest.raises(IndexError):
empty_stream[0]
def test_custom_type(custom_stream, custom_stream_class):
slice_list = custom_stream[:10]
assert isinstance(slice_list, custom_stream_class)
assert len(slice_list) == 10
| 22.684211 | 57 | 0.729698 |
6cd9b1d1fb5f1a9e960259a8076e0efcce79a749 | 2,156 | py | Python | userbot/utils/tools.py | syiibamir/syiibamir | 6cab01d174671bb0f4233fd112ac025ec02a6e03 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 6 | 2021-01-10T13:51:35.000Z | 2022-02-28T20:25:55.000Z | userbot/utils/tools.py | INDOHACKER-XODE/ajib | 053603a1ce8fa122e3b36318d94b0de7d8cb1e52 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4 | 2021-12-19T22:45:50.000Z | 2021-12-19T22:45:50.000Z | userbot/utils/tools.py | INDOHACKER-XODE/ajib | 053603a1ce8fa122e3b36318d94b0de7d8cb1e52 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 68 | 2020-11-04T14:22:09.000Z | 2022-03-07T14:46:37.000Z | # Copyright (C) 2020 Adek Maulana
#
# SPDX-License-Identifier: GPL-3.0-or-later
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
import re
async def md5(fname: str) -> str:
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def humanbytes(size: int) -> str:
if size is None or isinstance(size, str):
return ""
power = 2 ** 10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(seconds: int) -> str:
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = (
((str(days) + " day(s), ") if days else "")
+ ((str(hours) + " hour(s), ") if hours else "")
+ ((str(minutes) + " minute(s), ") if minutes else "")
+ ((str(seconds) + " second(s), ") if seconds else "")
)
return tmp[:-2]
def human_to_bytes(size: str) -> int:
units = {
"M": 2 ** 20,
"MB": 2 ** 20,
"G": 2 ** 30,
"GB": 2 ** 30,
"T": 2 ** 40,
"TB": 2 ** 40,
}
size = size.upper()
if not re.match(r" ", size):
size = re.sub(r"([KMGT])", r" \1", size)
number, unit = [string.strip() for string in size.split()]
return int(float(number) * units[unit])
| 30.366197 | 72 | 0.597866 |
d7b7bad963f68e3d27f28b6289a6be7465093847 | 6,702 | py | Python | apps/osis2/app/runner.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | null | null | null | apps/osis2/app/runner.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | 4 | 2016-08-25T12:08:39.000Z | 2018-04-12T12:36:01.000Z | apps/osis2/app/runner.py | rudecs/jumpscale_core7 | 30c03f26f1cdad3edbb9d79d50fbada8acc974f5 | [
"Apache-2.0"
] | 3 | 2016-03-08T07:49:34.000Z | 2018-10-19T13:56:43.000Z | from JumpScale import j
from JumpScale.baselib import cmdutils
import os
import sys
import swagger
from eve import Eve
from eve.utils import config
from eve.render import send_response
from eve_docs import eve_docs
from eve_docs.config import get_cfg
from eve_sqlalchemy import SQL as _SQL
from eve_sqlalchemy.validation import ValidatorSQL
from werkzeug.wsgi import DispatcherMiddleware
from werkzeug.serving import run_simple
import flask.ext.sqlalchemy as flask_sqlalchemy
from . import modelloader
from . import api
from .converters import osis2mongo
from .converters import osis2sqlalchemy
from .sqlalchemy import common
import urlparse
import importlib
import copy
STATIC_PATH = '/opt/jumpscale7/jslib/swagger/'
HOOKSMAP = {'on_insert_%s': 'pre_create',
'on_inserted_%s': 'post_create',
'on_fetched_%s': 'get',
'on_replace_%s': 'pre_update',
'on_replaced_%s': 'post_update',
'on_update_%s': 'pre_partial_update',
'on_updated_%s': 'post_partial_update',
'on_delete_%s': 'pre_delete',
'on_deleted_%s': 'post_delete',
'on_fetched_item_%s': 'get',
}
ID_FIELD = 'guid'
ITEM_LOOKUP_FIELD = ID_FIELD
config.ID_FIELD = ID_FIELD
config.ITEM_LOOKUP_FIELD = ID_FIELD
BASE_SETTINGS = {
'DEBUG': True,
'ID_FIELD': ID_FIELD,
'IF_MATCH':False,
'ITEM_URL': 'regex("[a-f0-9\-]+")',
'ITEM_LOOKUP_FIELD': ITEM_LOOKUP_FIELD,
'RESOURCE_METHODS': ['GET', 'POST'],
'ITEM_METHODS': ['GET', 'PATCH', 'PUT', 'DELETE'],
'X_DOMAINS': '*',
'X_HEADERS': ["X-HTTP-Method-Override", 'If-Match'],
'PAGINATION_LIMIT': 100000
}
BASE_MONGO_SETTINGS = copy.deepcopy(BASE_SETTINGS)
BASE_MONGO_SETTINGS['MONGO_HOST'] = '127.0.0.1'
BASE_MONGO_SETTINGS['MONGO_PORT'] = 27017
def hookevents(namespace, app):
# Get all events per namespace
modulename = 'models.hooks.%s' % namespace
namespace_hooks = {}
try:
module = importlib.import_module(modulename)
for _, hookmethod in HOOKSMAP.iteritems():
if hasattr(module, hookmethod):
namespace_hooks[hookmethod] = getattr(module, hookmethod)
print 'Finished loading gloabl namespace hookd for namespace %s' % namespace
except ImportError:
pass
for model in app.settings['DOMAIN'].keys():
modulename = 'models.hooks.%s.%s' % (namespace, model)
module = None
try:
module = importlib.import_module(modulename)
except ImportError:
pass
for dbevent, hookmethod in HOOKSMAP.iteritems():
evehook = dbevent % model
hook = getattr(app, evehook)
if hookmethod in namespace_hooks:
hook += namespace_hooks[hookmethod]
if module and hasattr(module, hookmethod):
hook += getattr(module, hookmethod)
print 'Finished Loading hooks for module %s' % modulename
def prepare_mongoapp(namespace, models):
dbname = namespace if namespace != 'system' else 'js_system'
my_settings = copy.deepcopy(BASE_MONGO_SETTINGS)
my_settings['MONGO_DBNAME'] = dbname
my_settings['MONGO_QUERY_BLACKLIST'] = []
my_settings['DOMAIN'] = osis2mongo.generateDomain(namespace, models)
# init application
app = Eve('osis', settings=my_settings, static_url_path=STATIC_PATH)
swagger.expose_docs(app, STATIC_PATH)
hookevents(namespace, app)
return app
def prepare_sqlapp(namespace, models, sqluri, from_spec_file=True):
my_settings = copy.deepcopy(BASE_SETTINGS)
parts = urlparse.urlparse(sqluri)
if parts.scheme == 'sqlite':
j.system.fs.createDir(parts.path)
sqluri = j.system.fs.joinPaths(sqluri, '%s.sqlite' % namespace)
my_settings['SQLALCHEMY_DATABASE_URI'] = sqluri
my_settings['SQLALCHEMY_ECHO'] = True
my_settings['IF_MATCH'] = False
my_settings['SQLALCHEMY_RECORD_QUERIES'] = True
if from_spec_file:
my_settings['DOMAIN'] = osis2sqlalchemy.generateDomainFromSpecFile(namespace, models)
else:
my_settings['DOMAIN'] = osis2sqlalchemy.generateDomainFromModelFiles(namespace, models)
db = flask_sqlalchemy.SQLAlchemy()
class SQL(_SQL):
driver = db
def init_app(self, app):
try:
# FIXME: dumb double initialisation of the
# driver because Eve sets it to None in __init__
self.driver = db
self.driver.app = app
self.driver.init_app(app)
except Exception as e:
raise ConnectionException(e)
self.register_schema(app)
app = Eve(validator=ValidatorSQL, data=SQL, settings=my_settings, static_url_path=STATIC_PATH)
db = app.data.driver
common.Base.metadata.bind = db.engine
db.Model = common.Base
db.create_all()
swagger.expose_docs(app, STATIC_PATH)
hookevents(namespace, app)
return app
def start(basedir, hrd):
port = hrd.getInt('instance.param.osis2.port')
mongdb_instance = hrd.get('instance.param.osis2.mongodb.connection', '')
sqluri = hrd.get('instance.param.osis2.sqlalchemy.uri', '')
use_reloader = hrd.getBool('instance.param.osis2.use_reloader')
use_debugger = hrd.getBool('instance.param.osis2.use_debugger')
if mongdb_instance:
mongohrd = j.application.getAppInstanceHRD('mongodb_client', mongdb_instance)
BASE_SETTINGS['MONGO_HOST'] = mongohrd.get('instance.param.addr')
BASE_SETTINGS['MONGO_PORT'] = mongohrd.getInt('instance.param.port')
apps = dict()
fullspecs = modelloader.find_model_specs(basedir)
namespaces = []
for type_, specs in fullspecs.iteritems():
for namespace, models in specs.iteritems():
if type_ == 'sql':
app = prepare_sqlapp(namespace, models, sqluri)
else:
app = prepare_mongoapp(namespace, models)
apps['/models/%s' % namespace] = app
namespaces.append(namespace)
# Auto load sqlalchemy models from python files
spaces_models = modelloader.find_model_files(basedir)
for namespace, models in spaces_models.iteritems():
app = prepare_sqlapp(namespace, models, sqluri, False)
apps['/models/%s' % namespace] = app
namespaces.append(namespace)
if apps:
apiapp = api.register_api(namespaces)
apps['/api'] = apiapp
application = DispatcherMiddleware(apiapp, apps)
# let's roll
run_simple('0.0.0.0', port, application, use_reloader=use_reloader, use_debugger=use_debugger)
| 35.648936 | 102 | 0.661892 |
a4642cb5fc4767704ba29ea59775725155344d8c | 14,585 | py | Python | analysis.py | malllabiisc/kg-geometry | d5b40d6795085109da5438cdc1d83d32fd5fc373 | [
"Apache-2.0"
] | 18 | 2018-07-31T06:33:45.000Z | 2021-07-22T11:27:40.000Z | analysis.py | malllabiisc/kg-geometry | d5b40d6795085109da5438cdc1d83d32fd5fc373 | [
"Apache-2.0"
] | 3 | 2018-07-30T02:48:06.000Z | 2021-05-03T07:17:48.000Z | analysis.py | malllabiisc/kg-geometry | d5b40d6795085109da5438cdc1d83d32fd5fc373 | [
"Apache-2.0"
] | 2 | 2018-07-01T08:53:06.000Z | 2018-12-12T05:15:40.000Z | import sys
import os
import argparse
import numpy as np
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
from sklearn.manifold import TSNE
import scipy.stats as scistats
from stats import Stats
from model import Model
from triples import Triples
from util import *
import cPickle as pickle
class Analyser:
def __init__(self, datafile, modelfile, usePR=False):
self.datafile = datafile
self.modelfile = modelfile
self.t = Triples(datafile)
self.model = Model()
self.model.loadModel(modelfile)
self.stats = Stats(self.t, usePR)
self.meanE = self.model.E.mean(axis=0)
self.meanR = self.model.R.mean(axis=0)
def getEntIdxs(self, ranges):
idxs = []
for rankBand, ns in ranges:
idxs.append(self.stats.getEnts(rankBand, ns))
return idxs
def getRelIdxs(self, ranges):
idxs = []
for rankBand, ns in ranges:
idxs.append(self.stats.getRels(rankBand, ns))
return idxs
def entPerf(self, opdir):
#eRanges = [((0,100), nSamples), ((100,500), nSamples), ((500,5000), nSamples), ((5000, -1), nSamples)]
eRanges = [(0,100), (100,500), (500, 5000), (5000, self.t.ne)]
entIndices = []
for rankband in eRanges:
entIndices.append(self.stats.getEnts(rankband, rankband[1]-rankband[0]))
rel_triples = self.t.groupByRelation("test")
ranks = {}
ent_perf = {}
for rel, val in self.model.fpos_test.iteritems():
for idx, (h,t) in enumerate(rel_triples[rel]):
ranks.setdefault(h, {}).setdefault('head', []).append((val['head'][idx], val['tail'][idx]))
ranks.setdefault(t, {}).setdefault('tail', []).append((val['head'][idx], val['tail'][idx]))
all_ranks = []
for rangeIdx, idxSet in enumerate(entIndices):
cur_head_ranks = []
cur_tail_ranks = []
cur_all_ranks = []
for idx in idxSet:
cur_head_ranks.extend(ranks.get(idx, {}).get('head', []))
cur_tail_ranks.extend(ranks.get(idx, {}).get('tail', []))
cur_all_ranks = cur_head_ranks + cur_tail_ranks
all_ranks.extend(cur_all_ranks)
ent_perf[eRanges[rangeIdx]] = {"head" : getPerfFromRanks(np.array(cur_head_ranks, dtype=np.int32)),
"tail": getPerfFromRanks(np.array(cur_tail_ranks, dtype=np.int32)),
"all" : getPerfFromRanks(np.array(cur_all_ranks, dtype=np.int32)),
}
all_perf = getPerfFromRanks(np.array(all_ranks, dtype=np.int32))
outfile = os.path.join(opdir, ".".join(os.path.split(self.modelfile)[1].split(".")[:-1]+["ent_perf","p"]))
with open(outfile, "wb") as fout:
pickle.dump({"ent_perf":ent_perf, "all_perf":all_perf}, fout)
outfile = os.path.join(opdir, ".".join(os.path.split(self.modelfile)[1].split(".")[:-1]+["ent_perf","txt"]))
with open(outfile, "w") as fout:
fout.write("Range\t\tMR\tMRR\tHits@1\tHits@3\tHits@10\tHits@100\n")
for a in eRanges:
perf = ent_perf[a]['all']
line = "%10s\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n" % (str(a), perf['MR'][1], perf['MRR'][1], perf['Hits@1'][1], perf['Hits@3'][1], perf['Hits@10'][1], perf['Hits@100'][1])
fout.write(line)
def relPerf(self, opdir):
rRanges = []
interval = 4
for i in range(0,self.t.nr-1,interval):
rRanges.append(((i,i+interval), interval))
#rRanges = [((0,50), 50), ((50,100), 50), ((100,200), 100), ((200, 500), 300), ((500,self.t.nr), self.t.nr-500)]
#rRanges = [((0,100), 100), ((100,500), 400), ((500,self.t.nr), self.t.nr-500)]
relIndices = self.getRelIdxs(rRanges)
idxSets = []
for rankBand, ns in rRanges:
idxSets.append(self.stats.getRels(rankBand, ns))
rel_perf = {}
all_ranks = self.model.fpos_test
for rangeIdx, idxSet in enumerate(idxSets):
cur_ranks = []
for idx in idxSet:
cur_ranks.extend(all_ranks.get(idx, {}).get('tail', []))
rel_perf[rRanges[rangeIdx][0]] = getPerfFromRanks(np.array(cur_ranks, dtype=np.int32))
#rel_perf.append(getPerfFromRanks(np.array(cur_ranks, dtype=np.int32)))
outfile = os.path.join(opdir, ".".join(os.path.split(self.modelfile)[1].split(".")[:-1]+["rel_perf","p"]))
#outfile = os.path.join(os.path.split(self.modelfile)[0], "rel_perf.p")
with open(outfile, "wb") as fout:
pickle.dump(rel_perf, fout)
#outfile = os.path.join(os.path.split(self.modelfile)[0], "rel_perf.txt")
outfile = os.path.join(opdir, ".".join(os.path.split(self.modelfile)[1].split(".")[:-1]+["rel_perf","txt"]))
with open(outfile, "w") as fout:
fout.write("Range\t\tMR\tMRR\tHits@1\tHits@3\tHits@10\tHits@100\n")
for a,b in rRanges:
perf = rel_perf[a]
line = "%10s\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\t%.2f\n" % (str(a), perf['MR'], perf['MRR'], perf['Hits@1'], perf['Hits@3'], perf['Hits@10'], perf['Hits@100'])
fout.write(line)
def run(self, vectorType, sampleMean, isnormalized, outputdir, showplot):
outputfile = ".".join(os.path.split(self.modelfile)[-1].split(".")[:-1])
#outputfile = outputfile + ".p"
#outputfile = outputfile + ".png"
outputfile = os.path.join(outputdir, outputfile)
if os.path.exists(outputfile):
print "File already exists. Exitting..."
print outputfile
#return
#finalize the set to be analysed
nSamples = 100
#eRanges = [((0,100), nSamples), ((100,500), nSamples), ((500,5000), nSamples), ((5000, 50000), nSamples), ((50000, -1), nSamples)]
eRanges = [((0,100), nSamples), ((100,500), nSamples), ((500,5000), nSamples), ((5000, -1), nSamples)]
entIndices = self.getEntIdxs(eRanges)
rRanges = [((0,100), nSamples), ((100,500), nSamples), ((500,-1), nSamples)]
relIndices = self.getRelIdxs(rRanges)
#colors = ['r','g','b','c']
#colors = "rgbcmykw" #plt.cm.get_cmap("hsv", N)
#legendLabels = ["0-100", "100-500", "500-5000", "5000-"]
legendLabels=[]
for a,b in eRanges:
curLabel = "%d-%d"%(a[0],a[1])
legendLabels.append(curLabel)
#markers = ["+", ".", "x", 3]
markers = "+.x3ov^<>p"
"""
plt.figure(1)
plt.suptitle(self.model.modelName + " - TSNE")
if vectorType in ["ent"]:
self.runTSNE(entIndices, True)
else:
self.runTSNE(relIndices, False)
plt.figure(2)
plt.suptitle(self.model.modelName + " - PCA")
if vectorType in ["ent"]:
self.runPCA(entIndices, True)
else:
self.runPCA(relIndices, False)
"""
if vectorType in ["ent"]:
gp, lp = self.getInnerProducts(entIndices, sampleMean=sampleMean, normalized=isnormalized)
else:
gp, lp = self.getInnerProducts(relIndices, ent=False, normalized=isnormalized)
nBuckets = len(gp)
params = os.path.split(self.modelfile)[-1].split(".")[:-1]
products = " ".join(["%.4f" % lpp for lpp in lp])
outstr = "%s %d %d %s" % (params[1], int(params[3][1:]), int(params[2][1:]), products)
print outstr
plt.figure(3)
message = ["Dot Product with", "Global Mean"]
if isnormalized:
message[0] = "Normalized "+ message[0]
if sampleMean:
message[1] = "Sample Mean"
plt.title(self.model.modelName)
#plt.title(self.model.modelName + " - %s"%(" ".join(message)), loc='center')
#plt.suptitle(self.model.modelName + " - Dot Product with Global Mean")
plt.xlim(-1.0,1.0)
if "trans" in self.model.modelName.lower():
#maxy = 5.0 #entities
maxy = 3.0
else:
#maxy = 16.0 #entities
maxy = 8.0 #relations
plt.ylim(0, maxy)
plt.yticks(np.arange(maxy))
figs = []
for i, gpi in enumerate(gp):
#plt.subplot(nBuckets, 1, i+1)
density = scistats.gaussian_kde(gpi)
#x,y, _ = plt.hist(gpi, nSamples)
#plt.plot(y, density(y), c='r')
x,y = np.histogram(gpi, nSamples/2)
figs.append(plt.plot(y, density(y), c=colors[i], label=legendLabels[i], marker=markers[i]))
#plt.legend(figs, legendLabels, loc='upper right')
#plt.legend(loc='upper left')
#plt.legend(figs, legendLabels, loc='upper right')
"""
plt.figure(4)
plt.suptitle(self.model.modelName + " - Dot Product with Local Means")
for i in range(nBuckets):
for j in range(nBuckets):
plt.subplot(nBuckets, nBuckets, nBuckets*i + j + 1)
plt.xlim(-1,1)
plt.hist(lp[i][j])
"""
if vectorType in ['rel']:
outputfile += ".rel"
fig = plt.gcf()
fig.set_size_inches(16,10)
plt.savefig(outputfile+".png", dpi=72)
pickle.dump({"model":params[1], "dim":int(params[3][1:]), "neg":int(params[2][1:]), "dots":products},open(outputfile+".p", "wb"))
if showplot:
print outputfile
plt.show()
def runTSNE(self, indices, ent=True):
if ent:
vectors = self.model.E
else:
vectors = self.model.R
nComponents = 2
dim = vectors.shape[1]
colors = ['r','g','b','c']
allIndices = []
for idxs in indices:
allIndices.extend(idxs)
#temp = tsne(vectors[allIndices,:], 2, dim, 20.0)
temp = TSNE(n_components=2).fit_transform(vectors[allIndices,:])
for iteration, idxs in enumerate(indices):
nSamples = len(idxs)
plt.scatter(temp[iteration*nSamples:(iteration+1)*nSamples,0], temp[iteration*nSamples:(iteration+1)*nSamples,1], c=colors[iteration], marker="o")
#plt.show()
def getInnerProducts(self, indices, sampleMean=False, ent=True, normalized=False):
if ent:
vectors = self.model.E
mean = self.meanE
else:
vectors = self.model.R
mean = self.meanR
localProducts = []
globalProducts = []
meanDotProducts = []
if sampleMean:
means = [vectors[index, :].mean(axis=0) for index in indices]
mean = np.mean(means, axis=0)
if normalized:
vectors = normalize(vectors)
mean = mean/np.linalg.norm(mean)
for index in indices:
x = np.dot(vectors[index,:], mean)
globalProducts.append(x)
meanDotProducts.append(x.mean())
meanDotProducts.append(np.mean(meanDotProducts))
"""
for index1 in indices:
curVectors = vectors[index1,:]
curMean = curVectors.mean(axis=0)
curMean = curMean/np.linalg.norm(curMean)
curProducts = []
for index2 in indices:
curProducts.append(np.dot(vectors[index2,:], curMean))
localProducts.append(curProducts)
"""
return globalProducts, meanDotProducts
#return globalProducts, localProducts
def getLengths(self, indices, ent=True):
if ent:
vectors = self.model.E
else:
vectors = self.model.R
vectorLengths = []
meanVectorLengths = []
for index in indices:
x = np.linalg.norm(vectors[index,:], axis=1, ord=2)
vectorLengths.append(x)
meanVectorLengths.append(x.mean())
meanVectorLengths.append(np.mean(meanVectorLengths))
return vectorLengths, meanVectorLengths
def runPCA(self, entIndices, ent=True):
nComponents = 2
colors = ['r','g','b','c']
pca = PCA(n_components = nComponents)
if ent:
vectors = self.model.E
else:
vectors = self.model.R
for iteration, idxs in enumerate(entIndices):
nSamples = len(idxs)
temp = pca.fit_transform(vectors[idxs,:])
plt.scatter(temp[:,0], temp[:,1], c=colors[iteration], marker="v")
iteration += 1
#plt.show()
def getParser():
parser = argparse.ArgumentParser(description="parser for arguments", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-d", "--datafile", type=str, help="pickled triples file", required=True)
parser.add_argument("-m", "--modelfile", type=str, help="pickled model file", required=True)
parser.add_argument("--smean", dest='smean', help="Flag for using sample mean[default]", action='store_true')
parser.add_argument("--no-smean", dest='smean', help="Flag for using global mean", action='store_false')
parser.set_defaults(smean=True)
parser.add_argument("--normalized", dest="normalized", help="Flag for using normalized vectors for dot products[default]", action='store_true')
parser.add_argument("--no-normalized", dest="normalized", help="Flag for using unnormalized vectors for dot products", action='store_false')
parser.set_defaults(normalized=True)
parser.add_argument("--show", dest="show", help="Flag for showing plot[False]", action='store_true')
parser.set_defaults(show=False)
parser.add_argument("--pr", dest="pr", help="Flag for using pagerank plot[False]", action='store_true')
parser.set_defaults(pr=False)
parser.add_argument("-t", "--type", type=str, help="[ent]/rel", default="ent")
parser.add_argument("-o", "--opdir", type=str, help="output directory to save the figure", required=True)
return parser
def main():
parser = getParser()
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
if not os.path.exists(args.modelfile):
print "file doesn't exists : ", args.modelfile
sys.exit(1)
a = Analyser(args.datafile, args.modelfile, args.pr)
#a.runPCA()
a.run(args.type, args.smean, args.normalized, args.opdir, args.show)
#a.relPerf(args.opdir)
#a.entPerf(args.opdir)
if __name__ == "__main__":
main()
| 40.626741 | 188 | 0.571957 |
da992fb626ed291d1ec01872a703f73365cb16f3 | 25,110 | py | Python | 8 HOUSE PRICES/xgboost-lasso.py | MLVPRASAD/KaggleProjects | 379e062cf58d83ff57a456552bb956df68381fdd | [
"MIT"
] | 2 | 2020-01-25T08:31:14.000Z | 2022-03-23T18:24:03.000Z | 8 HOUSE PRICES/xgboost-lasso.py | MlvPrasadOfficial/KaggleNoteboooks_of_Projects | 379e062cf58d83ff57a456552bb956df68381fdd | [
"MIT"
] | null | null | null | 8 HOUSE PRICES/xgboost-lasso.py | MlvPrasadOfficial/KaggleNoteboooks_of_Projects | 379e062cf58d83ff57a456552bb956df68381fdd | [
"MIT"
] | null | null | null | # This script creates a ton of features, then trains an XGBoost regressor
# and a Lasso regressor, and combines their predictions.
#
# It borrows ideas from lots of other people's scripts, including:
# https://www.kaggle.com/klyusba/house-prices-advanced-regression-techniques/lasso-model-for-regression-problem/notebook
# https://www.kaggle.com/juliencs/house-prices-advanced-regression-techniques/a-study-on-regression-applied-to-the-ames-dataset/
# https://www.kaggle.com/apapiu/house-prices-advanced-regression-techniques/regularized-linear-models
# but I probably forgot to mention a few. ;-)
import numpy as np
import pandas as pd
# The error metric: RMSE on the log of the sale prices.
from sklearn.metrics import mean_squared_error
def rmse(y_true, y_pred):
return np.sqrt(mean_squared_error(y_true, y_pred))
# Load the data.
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
# There are a few houses with more than 4000 sq ft living area that are
# outliers, so we drop them from the training data. (There is also one in
# the test set but we obviously can't drop that one.)
train_df.drop(train_df[train_df["GrLivArea"] > 4000].index, inplace=True)
# The test example with ID 666 has GarageArea, GarageCars, and GarageType
# but none of the other fields, so use the mode and median to fill them in.
test_df.loc[666, "GarageQual"] = "TA"
test_df.loc[666, "GarageCond"] = "TA"
test_df.loc[666, "GarageFinish"] = "Unf"
test_df.loc[666, "GarageYrBlt"] = "1980"
# The test example 1116 only has GarageType but no other information. We'll
# assume it does not have a garage.
test_df.loc[1116, "GarageType"] = np.nan
# For imputing missing values: fill in missing LotFrontage values by the median
# LotFrontage of the neighborhood.
lot_frontage_by_neighborhood = train_df["LotFrontage"].groupby(train_df["Neighborhood"])
# Used to convert categorical features into ordinal numbers.
# (There's probably an easier way to do this, but it works.)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
def factorize(df, factor_df, column, fill_na=None):
factor_df[column] = df[column]
if fill_na is not None:
factor_df[column].fillna(fill_na, inplace=True)
le.fit(factor_df[column].unique())
factor_df[column] = le.transform(factor_df[column])
return factor_df
# Combine all the (numerical) features into one big DataFrame. We don't add
# the one-hot encoded variables here yet, that happens later on.
def munge(df):
all_df = pd.DataFrame(index = df.index)
all_df["LotFrontage"] = df["LotFrontage"]
for key, group in lot_frontage_by_neighborhood:
idx = (df["Neighborhood"] == key) & (df["LotFrontage"].isnull())
all_df.loc[idx, "LotFrontage"] = group.median()
all_df["LotArea"] = df["LotArea"]
all_df["MasVnrArea"] = df["MasVnrArea"]
all_df["MasVnrArea"].fillna(0, inplace=True)
all_df["BsmtFinSF1"] = df["BsmtFinSF1"]
all_df["BsmtFinSF1"].fillna(0, inplace=True)
all_df["BsmtFinSF2"] = df["BsmtFinSF2"]
all_df["BsmtFinSF2"].fillna(0, inplace=True)
all_df["BsmtUnfSF"] = df["BsmtUnfSF"]
all_df["BsmtUnfSF"].fillna(0, inplace=True)
all_df["TotalBsmtSF"] = df["TotalBsmtSF"]
all_df["TotalBsmtSF"].fillna(0, inplace=True)
all_df["1stFlrSF"] = df["1stFlrSF"]
all_df["2ndFlrSF"] = df["2ndFlrSF"]
all_df["GrLivArea"] = df["GrLivArea"]
all_df["GarageArea"] = df["GarageArea"]
all_df["GarageArea"].fillna(0, inplace=True)
all_df["WoodDeckSF"] = df["WoodDeckSF"]
all_df["OpenPorchSF"] = df["OpenPorchSF"]
all_df["EnclosedPorch"] = df["EnclosedPorch"]
all_df["3SsnPorch"] = df["3SsnPorch"]
all_df["ScreenPorch"] = df["ScreenPorch"]
all_df["BsmtFullBath"] = df["BsmtFullBath"]
all_df["BsmtFullBath"].fillna(0, inplace=True)
all_df["BsmtHalfBath"] = df["BsmtHalfBath"]
all_df["BsmtHalfBath"].fillna(0, inplace=True)
all_df["FullBath"] = df["FullBath"]
all_df["HalfBath"] = df["HalfBath"]
all_df["BedroomAbvGr"] = df["BedroomAbvGr"]
all_df["KitchenAbvGr"] = df["KitchenAbvGr"]
all_df["TotRmsAbvGrd"] = df["TotRmsAbvGrd"]
all_df["Fireplaces"] = df["Fireplaces"]
all_df["GarageCars"] = df["GarageCars"]
all_df["GarageCars"].fillna(0, inplace=True)
all_df["CentralAir"] = (df["CentralAir"] == "Y") * 1.0
all_df["OverallQual"] = df["OverallQual"]
all_df["OverallCond"] = df["OverallCond"]
# Quality measurements are stored as text but we can convert them to
# numbers where a higher number means higher quality.
qual_dict = {None: 0, "Po": 1, "Fa": 2, "TA": 3, "Gd": 4, "Ex": 5}
all_df["ExterQual"] = df["ExterQual"].map(qual_dict).astype(int)
all_df["ExterCond"] = df["ExterCond"].map(qual_dict).astype(int)
all_df["BsmtQual"] = df["BsmtQual"].map(qual_dict).astype(int)
all_df["BsmtCond"] = df["BsmtCond"].map(qual_dict).astype(int)
all_df["HeatingQC"] = df["HeatingQC"].map(qual_dict).astype(int)
all_df["KitchenQual"] = df["KitchenQual"].map(qual_dict).astype(int)
all_df["FireplaceQu"] = df["FireplaceQu"].map(qual_dict).astype(int)
all_df["GarageQual"] = df["GarageQual"].map(qual_dict).astype(int)
all_df["GarageCond"] = df["GarageCond"].map(qual_dict).astype(int)
all_df["BsmtExposure"] = df["BsmtExposure"].map(
{None: 0, "No": 1, "Mn": 2, "Av": 3, "Gd": 4}).astype(int)
bsmt_fin_dict = {None: 0, "Unf": 1, "LwQ": 2, "Rec": 3, "BLQ": 4, "ALQ": 5, "GLQ": 6}
all_df["BsmtFinType1"] = df["BsmtFinType1"].map(bsmt_fin_dict).astype(int)
all_df["BsmtFinType2"] = df["BsmtFinType2"].map(bsmt_fin_dict).astype(int)
all_df["Functional"] = df["Functional"].map(
{None: 0, "Sal": 1, "Sev": 2, "Maj2": 3, "Maj1": 4,
"Mod": 5, "Min2": 6, "Min1": 7, "Typ": 8}).astype(int)
all_df["GarageFinish"] = df["GarageFinish"].map(
{None: 0, "Unf": 1, "RFn": 2, "Fin": 3}).astype(int)
all_df["Fence"] = df["Fence"].map(
{None: 0, "MnWw": 1, "GdWo": 2, "MnPrv": 3, "GdPrv": 4}).astype(int)
all_df["YearBuilt"] = df["YearBuilt"]
all_df["YearRemodAdd"] = df["YearRemodAdd"]
all_df["GarageYrBlt"] = df["GarageYrBlt"]
all_df["GarageYrBlt"].fillna(0.0, inplace=True)
all_df["MoSold"] = df["MoSold"]
all_df["YrSold"] = df["YrSold"]
all_df["LowQualFinSF"] = df["LowQualFinSF"]
all_df["MiscVal"] = df["MiscVal"]
all_df["PoolQC"] = df["PoolQC"].map(qual_dict).astype(int)
all_df["PoolArea"] = df["PoolArea"]
all_df["PoolArea"].fillna(0, inplace=True)
# Add categorical features as numbers too. It seems to help a bit.
all_df = factorize(df, all_df, "MSSubClass")
all_df = factorize(df, all_df, "MSZoning", "RL")
all_df = factorize(df, all_df, "LotConfig")
all_df = factorize(df, all_df, "Neighborhood")
all_df = factorize(df, all_df, "Condition1")
all_df = factorize(df, all_df, "BldgType")
all_df = factorize(df, all_df, "HouseStyle")
all_df = factorize(df, all_df, "RoofStyle")
all_df = factorize(df, all_df, "Exterior1st", "Other")
all_df = factorize(df, all_df, "Exterior2nd", "Other")
all_df = factorize(df, all_df, "MasVnrType", "None")
all_df = factorize(df, all_df, "Foundation")
all_df = factorize(df, all_df, "SaleType", "Oth")
all_df = factorize(df, all_df, "SaleCondition")
# IR2 and IR3 don't appear that often, so just make a distinction
# between regular and irregular.
all_df["IsRegularLotShape"] = (df["LotShape"] == "Reg") * 1
# Most properties are level; bin the other possibilities together
# as "not level".
all_df["IsLandLevel"] = (df["LandContour"] == "Lvl") * 1
# Most land slopes are gentle; treat the others as "not gentle".
all_df["IsLandSlopeGentle"] = (df["LandSlope"] == "Gtl") * 1
# Most properties use standard circuit breakers.
all_df["IsElectricalSBrkr"] = (df["Electrical"] == "SBrkr") * 1
# About 2/3rd have an attached garage.
all_df["IsGarageDetached"] = (df["GarageType"] == "Detchd") * 1
# Most have a paved drive. Treat dirt/gravel and partial pavement
# as "not paved".
all_df["IsPavedDrive"] = (df["PavedDrive"] == "Y") * 1
# The only interesting "misc. feature" is the presence of a shed.
all_df["HasShed"] = (df["MiscFeature"] == "Shed") * 1.
# If YearRemodAdd != YearBuilt, then a remodeling took place at some point.
all_df["Remodeled"] = (all_df["YearRemodAdd"] != all_df["YearBuilt"]) * 1
# Did a remodeling happen in the year the house was sold?
all_df["RecentRemodel"] = (all_df["YearRemodAdd"] == all_df["YrSold"]) * 1
# Was this house sold in the year it was built?
all_df["VeryNewHouse"] = (all_df["YearBuilt"] == all_df["YrSold"]) * 1
all_df["Has2ndFloor"] = (all_df["2ndFlrSF"] == 0) * 1
all_df["HasMasVnr"] = (all_df["MasVnrArea"] == 0) * 1
all_df["HasWoodDeck"] = (all_df["WoodDeckSF"] == 0) * 1
all_df["HasOpenPorch"] = (all_df["OpenPorchSF"] == 0) * 1
all_df["HasEnclosedPorch"] = (all_df["EnclosedPorch"] == 0) * 1
all_df["Has3SsnPorch"] = (all_df["3SsnPorch"] == 0) * 1
all_df["HasScreenPorch"] = (all_df["ScreenPorch"] == 0) * 1
# These features actually lower the score a little.
# all_df["HasBasement"] = df["BsmtQual"].isnull() * 1
# all_df["HasGarage"] = df["GarageQual"].isnull() * 1
# all_df["HasFireplace"] = df["FireplaceQu"].isnull() * 1
# all_df["HasFence"] = df["Fence"].isnull() * 1
# Months with the largest number of deals may be significant.
all_df["HighSeason"] = df["MoSold"].replace(
{1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 1, 7: 1, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0})
all_df["NewerDwelling"] = df["MSSubClass"].replace(
{20: 1, 30: 0, 40: 0, 45: 0,50: 0, 60: 1, 70: 0, 75: 0, 80: 0, 85: 0,
90: 0, 120: 1, 150: 0, 160: 0, 180: 0, 190: 0})
all_df.loc[df.Neighborhood == 'NridgHt', "Neighborhood_Good"] = 1
all_df.loc[df.Neighborhood == 'Crawfor', "Neighborhood_Good"] = 1
all_df.loc[df.Neighborhood == 'StoneBr', "Neighborhood_Good"] = 1
all_df.loc[df.Neighborhood == 'Somerst', "Neighborhood_Good"] = 1
all_df.loc[df.Neighborhood == 'NoRidge', "Neighborhood_Good"] = 1
all_df["Neighborhood_Good"].fillna(0, inplace=True)
all_df["SaleCondition_PriceDown"] = df.SaleCondition.replace(
{'Abnorml': 1, 'Alloca': 1, 'AdjLand': 1, 'Family': 1, 'Normal': 0, 'Partial': 0})
# House completed before sale or not
all_df["BoughtOffPlan"] = df.SaleCondition.replace(
{"Abnorml" : 0, "Alloca" : 0, "AdjLand" : 0, "Family" : 0, "Normal" : 0, "Partial" : 1})
all_df["BadHeating"] = df.HeatingQC.replace(
{'Ex': 0, 'Gd': 0, 'TA': 0, 'Fa': 1, 'Po': 1})
area_cols = ['LotFrontage', 'LotArea', 'MasVnrArea', 'BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF',
'TotalBsmtSF', '1stFlrSF', '2ndFlrSF', 'GrLivArea', 'GarageArea', 'WoodDeckSF',
'OpenPorchSF', 'EnclosedPorch', '3SsnPorch', 'ScreenPorch', 'LowQualFinSF', 'PoolArea' ]
all_df["TotalArea"] = all_df[area_cols].sum(axis=1)
all_df["TotalArea1st2nd"] = all_df["1stFlrSF"] + all_df["2ndFlrSF"]
all_df["Age"] = 2010 - all_df["YearBuilt"]
all_df["TimeSinceSold"] = 2010 - all_df["YrSold"]
all_df["SeasonSold"] = all_df["MoSold"].map({12:0, 1:0, 2:0, 3:1, 4:1, 5:1,
6:2, 7:2, 8:2, 9:3, 10:3, 11:3}).astype(int)
all_df["YearsSinceRemodel"] = all_df["YrSold"] - all_df["YearRemodAdd"]
# Simplifications of existing features into bad/average/good.
all_df["SimplOverallQual"] = all_df.OverallQual.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2, 7 : 3, 8 : 3, 9 : 3, 10 : 3})
all_df["SimplOverallCond"] = all_df.OverallCond.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2, 7 : 3, 8 : 3, 9 : 3, 10 : 3})
all_df["SimplPoolQC"] = all_df.PoolQC.replace(
{1 : 1, 2 : 1, 3 : 2, 4 : 2})
all_df["SimplGarageCond"] = all_df.GarageCond.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplGarageQual"] = all_df.GarageQual.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplFireplaceQu"] = all_df.FireplaceQu.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplFireplaceQu"] = all_df.FireplaceQu.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplFunctional"] = all_df.Functional.replace(
{1 : 1, 2 : 1, 3 : 2, 4 : 2, 5 : 3, 6 : 3, 7 : 3, 8 : 4})
all_df["SimplKitchenQual"] = all_df.KitchenQual.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplHeatingQC"] = all_df.HeatingQC.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplBsmtFinType1"] = all_df.BsmtFinType1.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2})
all_df["SimplBsmtFinType2"] = all_df.BsmtFinType2.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2, 6 : 2})
all_df["SimplBsmtCond"] = all_df.BsmtCond.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplBsmtQual"] = all_df.BsmtQual.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplExterCond"] = all_df.ExterCond.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
all_df["SimplExterQual"] = all_df.ExterQual.replace(
{1 : 1, 2 : 1, 3 : 1, 4 : 2, 5 : 2})
# Bin by neighborhood (a little arbitrarily). Values were computed by:
# train_df["SalePrice"].groupby(train_df["Neighborhood"]).median().sort_values()
neighborhood_map = {
"MeadowV" : 0, # 88000
"IDOTRR" : 1, # 103000
"BrDale" : 1, # 106000
"OldTown" : 1, # 119000
"Edwards" : 1, # 119500
"BrkSide" : 1, # 124300
"Sawyer" : 1, # 135000
"Blueste" : 1, # 137500
"SWISU" : 2, # 139500
"NAmes" : 2, # 140000
"NPkVill" : 2, # 146000
"Mitchel" : 2, # 153500
"SawyerW" : 2, # 179900
"Gilbert" : 2, # 181000
"NWAmes" : 2, # 182900
"Blmngtn" : 2, # 191000
"CollgCr" : 2, # 197200
"ClearCr" : 3, # 200250
"Crawfor" : 3, # 200624
"Veenker" : 3, # 218000
"Somerst" : 3, # 225500
"Timber" : 3, # 228475
"StoneBr" : 4, # 278000
"NoRidge" : 4, # 290000
"NridgHt" : 4, # 315000
}
all_df["NeighborhoodBin"] = df["Neighborhood"].map(neighborhood_map)
return all_df
train_df_munged = munge(train_df)
test_df_munged = munge(test_df)
print(train_df_munged.shape)
print(test_df_munged.shape)
# Copy NeighborhoodBin into a temporary DataFrame because we want to use the
# unscaled version later on (to one-hot encode it).
neighborhood_bin_train = pd.DataFrame(index = train_df.index)
neighborhood_bin_train["NeighborhoodBin"] = train_df_munged["NeighborhoodBin"]
neighborhood_bin_test = pd.DataFrame(index = test_df.index)
neighborhood_bin_test["NeighborhoodBin"] = test_df_munged["NeighborhoodBin"]
################################################################################
numeric_features = train_df_munged.dtypes[train_df_munged.dtypes != "object"].index
# Transform the skewed numeric features by taking log(feature + 1).
# This will make the features more normal.
from scipy.stats import skew
skewed = train_df_munged[numeric_features].apply(lambda x: skew(x.dropna().astype(float)))
skewed = skewed[skewed > 0.75]
skewed = skewed.index
train_df_munged[skewed] = np.log1p(train_df_munged[skewed])
test_df_munged[skewed] = np.log1p(test_df_munged[skewed])
# Additional processing: scale the data.
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(train_df_munged[numeric_features])
scaled = scaler.transform(train_df_munged[numeric_features])
for i, col in enumerate(numeric_features):
train_df_munged[col] = scaled[:, i]
scaled = scaler.transform(test_df_munged[numeric_features])
for i, col in enumerate(numeric_features):
test_df_munged[col] = scaled[:, i]
################################################################################
# Convert categorical features using one-hot encoding.
def onehot(onehot_df, df, column_name, fill_na, drop_name):
onehot_df[column_name] = df[column_name]
if fill_na is not None:
onehot_df[column_name].fillna(fill_na, inplace=True)
dummies = pd.get_dummies(onehot_df[column_name], prefix="_" + column_name)
# Dropping one of the columns actually made the results slightly worse.
# if drop_name is not None:
# dummies.drop(["_" + column_name + "_" + drop_name], axis=1, inplace=True)
onehot_df = onehot_df.join(dummies)
onehot_df = onehot_df.drop([column_name], axis=1)
return onehot_df
def munge_onehot(df):
onehot_df = pd.DataFrame(index = df.index)
onehot_df = onehot(onehot_df, df, "MSSubClass", None, "40")
onehot_df = onehot(onehot_df, df, "MSZoning", "RL", "RH")
onehot_df = onehot(onehot_df, df, "LotConfig", None, "FR3")
onehot_df = onehot(onehot_df, df, "Neighborhood", None, "OldTown")
onehot_df = onehot(onehot_df, df, "Condition1", None, "RRNe")
onehot_df = onehot(onehot_df, df, "BldgType", None, "2fmCon")
onehot_df = onehot(onehot_df, df, "HouseStyle", None, "1.5Unf")
onehot_df = onehot(onehot_df, df, "RoofStyle", None, "Shed")
onehot_df = onehot(onehot_df, df, "Exterior1st", "VinylSd", "CBlock")
onehot_df = onehot(onehot_df, df, "Exterior2nd", "VinylSd", "CBlock")
onehot_df = onehot(onehot_df, df, "Foundation", None, "Wood")
onehot_df = onehot(onehot_df, df, "SaleType", "WD", "Oth")
onehot_df = onehot(onehot_df, df, "SaleCondition", "Normal", "AdjLand")
# Fill in missing MasVnrType for rows that do have a MasVnrArea.
temp_df = df[["MasVnrType", "MasVnrArea"]].copy()
idx = (df["MasVnrArea"] != 0) & ((df["MasVnrType"] == "None") | (df["MasVnrType"].isnull()))
temp_df.loc[idx, "MasVnrType"] = "BrkFace"
onehot_df = onehot(onehot_df, temp_df, "MasVnrType", "None", "BrkCmn")
# Also add the booleans from calc_df as dummy variables.
onehot_df = onehot(onehot_df, df, "LotShape", None, "IR3")
onehot_df = onehot(onehot_df, df, "LandContour", None, "Low")
onehot_df = onehot(onehot_df, df, "LandSlope", None, "Sev")
onehot_df = onehot(onehot_df, df, "Electrical", "SBrkr", "FuseP")
onehot_df = onehot(onehot_df, df, "GarageType", "None", "CarPort")
onehot_df = onehot(onehot_df, df, "PavedDrive", None, "P")
onehot_df = onehot(onehot_df, df, "MiscFeature", "None", "Othr")
# Features we can probably ignore (but want to include anyway to see
# if they make any positive difference).
# Definitely ignoring Utilities: all records are "AllPub", except for
# one "NoSeWa" in the train set and 2 NA in the test set.
onehot_df = onehot(onehot_df, df, "Street", None, "Grvl")
onehot_df = onehot(onehot_df, df, "Alley", "None", "Grvl")
onehot_df = onehot(onehot_df, df, "Condition2", None, "PosA")
onehot_df = onehot(onehot_df, df, "RoofMatl", None, "WdShake")
onehot_df = onehot(onehot_df, df, "Heating", None, "Wall")
# I have these as numerical variables too.
onehot_df = onehot(onehot_df, df, "ExterQual", "None", "Ex")
onehot_df = onehot(onehot_df, df, "ExterCond", "None", "Ex")
onehot_df = onehot(onehot_df, df, "BsmtQual", "None", "Ex")
onehot_df = onehot(onehot_df, df, "BsmtCond", "None", "Ex")
onehot_df = onehot(onehot_df, df, "HeatingQC", "None", "Ex")
onehot_df = onehot(onehot_df, df, "KitchenQual", "TA", "Ex")
onehot_df = onehot(onehot_df, df, "FireplaceQu", "None", "Ex")
onehot_df = onehot(onehot_df, df, "GarageQual", "None", "Ex")
onehot_df = onehot(onehot_df, df, "GarageCond", "None", "Ex")
onehot_df = onehot(onehot_df, df, "PoolQC", "None", "Ex")
onehot_df = onehot(onehot_df, df, "BsmtExposure", "None", "Gd")
onehot_df = onehot(onehot_df, df, "BsmtFinType1", "None", "GLQ")
onehot_df = onehot(onehot_df, df, "BsmtFinType2", "None", "GLQ")
onehot_df = onehot(onehot_df, df, "Functional", "Typ", "Typ")
onehot_df = onehot(onehot_df, df, "GarageFinish", "None", "Fin")
onehot_df = onehot(onehot_df, df, "Fence", "None", "MnPrv")
onehot_df = onehot(onehot_df, df, "MoSold", None, None)
# Divide up the years between 1871 and 2010 in slices of 20 years.
year_map = pd.concat(pd.Series("YearBin" + str(i+1), index=range(1871+i*20,1891+i*20)) for i in range(0, 7))
yearbin_df = pd.DataFrame(index = df.index)
yearbin_df["GarageYrBltBin"] = df.GarageYrBlt.map(year_map)
yearbin_df["GarageYrBltBin"].fillna("NoGarage", inplace=True)
yearbin_df["YearBuiltBin"] = df.YearBuilt.map(year_map)
yearbin_df["YearRemodAddBin"] = df.YearRemodAdd.map(year_map)
onehot_df = onehot(onehot_df, yearbin_df, "GarageYrBltBin", None, None)
onehot_df = onehot(onehot_df, yearbin_df, "YearBuiltBin", None, None)
onehot_df = onehot(onehot_df, yearbin_df, "YearRemodAddBin", None, None)
return onehot_df
# Add the one-hot encoded categorical features.
onehot_df = munge_onehot(train_df)
onehot_df = onehot(onehot_df, neighborhood_bin_train, "NeighborhoodBin", None, None)
train_df_munged = train_df_munged.join(onehot_df)
# These onehot columns are missing in the test data, so drop them from the
# training data or we might overfit on them.
drop_cols = [
"_Exterior1st_ImStucc", "_Exterior1st_Stone",
"_Exterior2nd_Other","_HouseStyle_2.5Fin",
"_RoofMatl_Membran", "_RoofMatl_Metal", "_RoofMatl_Roll",
"_Condition2_RRAe", "_Condition2_RRAn", "_Condition2_RRNn",
"_Heating_Floor", "_Heating_OthW",
"_Electrical_Mix",
"_MiscFeature_TenC",
"_GarageQual_Ex", "_PoolQC_Fa"
]
train_df_munged.drop(drop_cols, axis=1, inplace=True)
onehot_df = munge_onehot(test_df)
onehot_df = onehot(onehot_df, neighborhood_bin_test, "NeighborhoodBin", None, None)
test_df_munged = test_df_munged.join(onehot_df)
# This column is missing in the training data. There is only one example with
# this value in the test set. So just drop it.
test_df_munged.drop(["_MSSubClass_150"], axis=1, inplace=True)
# Drop these columns. They are either not very helpful or they cause overfitting.
drop_cols = [
"_Condition2_PosN", # only two are not zero
"_MSZoning_C (all)",
"_MSSubClass_160",
]
train_df_munged.drop(drop_cols, axis=1, inplace=True)
test_df_munged.drop(drop_cols, axis=1, inplace=True)
################################################################################
# We take the log here because the error metric is between the log of the
# SalePrice and the log of the predicted price. That does mean we need to
# exp() the prediction to get an actual sale price.
label_df = pd.DataFrame(index = train_df_munged.index, columns=["SalePrice"])
label_df["SalePrice"] = np.log(train_df["SalePrice"])
print("Training set size:", train_df_munged.shape)
print("Test set size:", test_df_munged.shape)
################################################################################
# XGBoost -- I did some "manual" cross-validation here but should really find
# these hyperparameters using CV. ;-)
import xgboost as xgb
regr = xgb.XGBRegressor(
colsample_bytree=0.2,
gamma=0.0,
learning_rate=0.01,
max_depth=4,
min_child_weight=1.5,
n_estimators=7200,
reg_alpha=0.9,
reg_lambda=0.6,
subsample=0.2,
seed=42,
silent=1)
regr.fit(train_df_munged, label_df)
# Run prediction on training set to get a rough idea of how well it does.
y_pred = regr.predict(train_df_munged)
y_test = label_df
print("XGBoost score on training set: ", rmse(y_test, y_pred))
# Run prediction on the Kaggle test set.
y_pred_xgb = regr.predict(test_df_munged)
################################################################################
from sklearn.linear_model import Lasso
# I found this best alpha through cross-validation.
best_alpha = 0.00099
regr = Lasso(alpha=best_alpha, max_iter=50000)
regr.fit(train_df_munged, label_df)
# Run prediction on training set to get a rough idea of how well it does.
y_pred = regr.predict(train_df_munged)
y_test = label_df
print("Lasso score on training set: ", rmse(y_test, y_pred))
# Run prediction on the Kaggle test set.
y_pred_lasso = regr.predict(test_df_munged)
################################################################################
# Blend the results of the two regressors and save the prediction to a CSV file.
y_pred = (y_pred_xgb + y_pred_lasso) / 2
y_pred = np.exp(y_pred)
pred_df = pd.DataFrame(y_pred, index=test_df["Id"], columns=["SalePrice"])
pred_df.to_csv('output.csv', header=True, index_label='Id')
| 43.59375 | 128 | 0.635205 |
a33edce162858b9b84831f71fc35b312becb14f5 | 2,284 | py | Python | violation/admin.py | adepeter/django-violations | 92f6052a11594a66a7a963abb04cb17e00412bcc | [
"MIT"
] | 1 | 2020-05-24T20:46:20.000Z | 2020-05-24T20:46:20.000Z | violation/admin.py | adepeter/django-violations | 92f6052a11594a66a7a963abb04cb17e00412bcc | [
"MIT"
] | null | null | null | violation/admin.py | adepeter/django-violations | 92f6052a11594a66a7a963abb04cb17e00412bcc | [
"MIT"
] | null | null | null | from django.contrib import admin
from violation.models import Rule, Violation
class RuleViolationInline(admin.StackedInline):
model = Rule.violations.through
@admin.register(Rule)
class RuleAdmin(admin.ModelAdmin):
radio_fields = {'category': admin.HORIZONTAL}
list_display = ['name', 'category', 'short_rule']
list_filter = ['category']
search_fields = ['name', 'category', 'description']
ordering = ['name', 'category']
save_on_top = True
def short_rule(self, obj):
return obj.description[:20]
short_rule.short_description = 'Description'
@admin.register(Violation)
class ViolationAdmin(admin.ModelAdmin):
readonly_fields = ['violator', 'created', 'modified']
list_display_links = ['id', 'rules_violated']
radio_fields = {'status': admin.HORIZONTAL}
list_display = [
'id',
'reported_by',
'violator',
'item_of_violation_category',
'item_of_violation',
'object_id',
'rules_violated',
'date_reported',
'status',
'is_violated',
'last_modified'
]
ordering = [
'reported_by',
'violator',
'is_violated',
'status'
]
list_filter = [
'reported_by',
'violator',
'status',
'is_violated'
]
list_editable = ['is_violated', 'status']
date_hierarchy = 'created'
filter_horizontal = ['rules']
list_per_page = 20
def item_of_violation_category(self, obj):
return obj.content_type
item_of_violation_category.short_description = 'Item category'
def item_of_violation(self, obj):
return obj.content_object
item_of_violation.short_description = 'Item of violation'
def date_reported(self, obj):
return obj.created
date_reported.short_description = 'Date reported'
def last_modified(self, obj):
return obj.modified
date_reported.short_description = 'Last action'
def rules_violated(self, obj):
rules = obj.rules.all()
return ('%s' % ', '.join([rule.name for rule in rules]))
short_description = 'Violated rules'
def get_readonly_fields(self, request, obj=None):
if obj is not None:
return self.list_display
return []
| 25.377778 | 66 | 0.638354 |
f1e506906b5099a6485f9b906b2b1948ecda3e11 | 640 | py | Python | setup.py | addiebarron/django-workers | e51d0b9ddae5c8617a06abe4e81c4901486c0077 | [
"MIT"
] | 18 | 2018-08-10T19:37:36.000Z | 2021-08-06T07:20:37.000Z | setup.py | addiebarron/django-workers | e51d0b9ddae5c8617a06abe4e81c4901486c0077 | [
"MIT"
] | 12 | 2018-08-28T21:26:59.000Z | 2021-06-10T20:46:57.000Z | setup.py | addiebarron/django-workers | e51d0b9ddae5c8617a06abe4e81c4901486c0077 | [
"MIT"
] | 12 | 2019-03-12T19:34:02.000Z | 2021-12-15T23:03:08.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="django-workers",
version="0.1.3",
author="Gavin Vickery",
author_email="gavin@geekforbrains.com",
description="Simple background tasks for Django",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/geekforbrains/django-workers",
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| 29.090909 | 58 | 0.676563 |
2b5de380162d9a3851d7be30e3348f44a2b05d79 | 4,232 | py | Python | setup_requirements.py | tomzhang/aiida_core | 949810e9f3daff0f748c5c9aa1dde4f5222bb49b | [
"BSD-2-Clause"
] | 1 | 2019-04-29T12:39:31.000Z | 2019-04-29T12:39:31.000Z | setup_requirements.py | tomzhang/aiida_core | 949810e9f3daff0f748c5c9aa1dde4f5222bb49b | [
"BSD-2-Clause"
] | null | null | null | setup_requirements.py | tomzhang/aiida_core | 949810e9f3daff0f748c5c9aa1dde4f5222bb49b | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida_core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
install_requires = [
'reentry==1.2.0',
'python-dateutil==2.7.2',
'python-mimeparse==1.6.0',
'django==1.8.19',
'django-extensions==1.5.0',
'tzlocal==1.5.1',
'pytz==2018.4',
'PyYAML==3.12',
'six==1.11.0',
'anyjson==0.3.3',
'psutil==5.4.5',
'meld3==1.0.2',
'mock==2.0.0',
'numpy==1.14.3',
'SQLAlchemy==1.0.19', # Upgrade to SQLalchemy 1.1.5 does break tests, see #465
'SQLAlchemy-Utils==0.33.0',
'alembic==0.9.9',
'ujson==1.35',
'aldjemy==0.8.0',
'passlib==1.7.1',
'validate-email==1.3',
'click==6.7',
'click-completion==0.3.1',
'click-plugins==1.0.3',
'click-spinner==0.1.7',
'tabulate==0.8.2',
'ete3==3.1.1',
'uritools==2.1.0',
'psycopg2-binary==2.7.4',
'paramiko==2.4.1',
'ecdsa==0.13',
'pika==0.11.2',
'ipython>=4.0,<6.0', # Version of ipython non enforced, because some still prefer version 4 rather than the latest
'plumpy==0.10.6',
'circus==0.14.0',
'tornado==4.5.3', # As of 2018/03/06 Tornado released v5.0 which breaks circus 0.14.0
'chainmap; python_version<"3.5"',
'pathlib2; python_version<"3.5"',
'singledispatch>=3.4.0.3; python_version<"3.5"',
'enum34==1.1.6; python_version<"3.5"',
]
extras_require = {
# Requirements for ssh transport with authentification through Kerberos token
# N. B.: you need to install first libffi and MIT kerberos GSSAPI including header files.
# E.g. for Ubuntu 14.04: sudo apt-get install libffi-dev libkrb5-dev
'ssh_kerberos': [
'pyasn1==0.4.2',
'python-gssapi==0.6.4',
],
# Requirements for RESTful API
'rest': [
'Flask==1.0.2',
'Flask-RESTful==0.3.6',
'Flask-Cors==3.0.4',
'pyparsing==2.2.0',
'Flask-SQLAlchemy==2.3.2',
'sqlalchemy-migrate==0.11.0',
'marshmallow-sqlalchemy==0.13.2',
'flask-marshmallow==0.9.0',
'itsdangerous==0.24',
'Flask-HTTPAuth==3.2.3',
'Flask-Cache==0.13.1',
'python-memcached==1.59',
],
# Requirements to building documentation
'docs': [
'Sphinx==1.7.7',
'Pygments==2.2.0',
'docutils==0.14',
'Jinja2==2.10',
'MarkupSafe==1.0',
'sphinx-rtd-theme==0.3.1', # Required by readthedocs
],
# Requirements for non-core functionalities that rely on external atomic manipulation/processing software
'atomic_tools': [
'spglib==1.10.3.65',
'pymatgen==2018.4.20',
'ase==3.12.0', # Updating breaks tests
'PyMySQL==0.8.0', # Required by ICSD tools
'PyCifRW==4.2.1', # Updating breaks tests
'seekpath==1.8.1',
'qe-tools==1.1.0',
],
# Requirements for jupyter notebook
'notebook': [
'jupyter==1.0.0',
],
# Requirements for testing
'testing': [
'pgtest==1.1.0',
'sqlalchemy-diff==0.1.3',
'coverage==4.5.1',
'codecov==2.0.15'
],
'dev_precommit': [
'pre-commit==1.8.2',
'yapf==0.23.0',
'prospector==0.12.11',
'pylint==1.8.4',
'pep8-naming==0.3.3',
'toml==0.9.4'
],
'dev_sphinxext': [
'pytest==3.6.3',
'pytest-cov==2.5.1',
],
'bpython': [
'bpython==0.17.1',
]
}
extras_require['dev_sphinxext'] += extras_require['docs']
extras_require['testing'] += extras_require['rest'] + extras_require['atomic_tools'] + extras_require['dev_sphinxext']
extras_require['all'] = [item for sublist in extras_require.values() for item in sublist if item != 'bpython']
| 33.322835 | 119 | 0.526938 |
3424750086fd5b2fcafc7dfc4457a16ae30cfa8f | 1,032 | py | Python | stocklist/fetch.py | MTI830PyTraders/pytrade | 33ea3e756019c999e9c3d78fca89cd72addf6ab2 | [
"BSD-3-Clause"
] | 3 | 2017-03-08T15:42:26.000Z | 2021-03-10T23:47:15.000Z | stocklist/fetch.py | fraka6/pytrade | 8a94b6e1b3922dcba95067c03abbf45975878b33 | [
"BSD-3-Clause"
] | 15 | 2015-05-20T03:11:58.000Z | 2018-03-30T23:42:18.000Z | stocklist/fetch.py | MTI830PyTraders/pytrade | 33ea3e756019c999e9c3d78fca89cd72addf6ab2 | [
"BSD-3-Clause"
] | 7 | 2016-04-12T09:49:22.000Z | 2021-03-10T23:47:19.000Z | from filters import Filter
from parse import Parse
import logging
class Fetch(object):
def fetch_stocks(self, params):
''' if params==all fetch all stocks get_all_categories'''
filter = Filter()
parser = Parse()
stocklist = []
if params=='all':
cats = filter.get_all_categories()
for cat in cats:
params = [('sc', cat)]
try:
stocklist.extend(self.fetch_stocks(params))
except Exception, e:
print cat
print e
#print stocklist
print 'exited prematurely'
exit()
else:
url = filter.build_query_string(params)
logging.info('url:%s' %url)
print url
stocklist = parser.parse(url, stocklist)
return stocklist
if __name__ == "__main__":
fetch = Fetch()
#params = [('sc', 812)]
params = 'all'
result = fetch.fetch_stocks(params)
| 27.891892 | 63 | 0.514535 |
d8157e51b11081c60c43654522e94afa9c5e7136 | 1,362 | py | Python | dp_utils/dp_pca.py | tkgsn/P3GM | 443550b7781b7ab3c920893923056a8faef44994 | [
"MIT"
] | 4 | 2020-11-25T08:59:12.000Z | 2022-02-03T20:55:12.000Z | dp_utils/dp_pca.py | tkgsn/P3GM | 443550b7781b7ab3c920893923056a8faef44994 | [
"MIT"
] | 1 | 2021-11-30T20:39:16.000Z | 2021-11-30T20:39:16.000Z | dp_utils/dp_pca.py | tkgsn/P3GM | 443550b7781b7ab3c920893923056a8faef44994 | [
"MIT"
] | null | null | null | """ DP Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <denis-alexander.engemann@inria.fr>
# Michael Eickenberg <michael.eickenberg@inria.fr>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
import my_util
from sklearn.decomposition.base import _BasePCA
class DP_GAUSSIAN_PCA(_BasePCA):
def __init__(self, sigma=1, n_components=None, whiten=False, random_state=None):
self.n_components = n_components
self.whiten = whiten
self.sigma = sigma
self.random_state = random_state
def compute_privacy(self):
return None
def fit(self, X):
self.mean_ = np.mean(X, axis=0)
data = X - self.mean_
# First, norm is clipped into 1
data = my_util.norm_clipping(data)
cov = np.dot(data.T, data)
noise = np.random.normal(loc = 0, scale = self.sigma, size = cov.shape)
noise = np.tril(noise) + np.tril(noise).T - np.diag(noise.diagonal())
cov = (cov + noise)/data.shape[0]
ev, evec = np.linalg.eig(cov)
evec = evec.T
self.components_ = evec[:self.n_components] | 28.978723 | 84 | 0.641703 |
7c67ce045ac10f7e6bc8d07ec8f030eb0a522af4 | 2,062 | py | Python | nipy/utils/environment.py | yarikoptic/NiPy-OLD | 8759b598ac72d3b9df7414642c7a662ad9c55ece | [
"BSD-3-Clause"
] | 1 | 2015-08-22T16:14:45.000Z | 2015-08-22T16:14:45.000Z | nipy/utils/environment.py | yarikoptic/NiPy-OLD | 8759b598ac72d3b9df7414642c7a662ad9c55ece | [
"BSD-3-Clause"
] | null | null | null | nipy/utils/environment.py | yarikoptic/NiPy-OLD | 8759b598ac72d3b9df7414642c7a662ad9c55ece | [
"BSD-3-Clause"
] | null | null | null | '''
Settings from the system environment relevant to NIPY
'''
import os
from os.path import join as pjoin
def get_home_dir():
"""Return the closest possible equivalent to a 'home' directory.
The path may not exist; code using this routine should not
expect the directory to exist.
Parameters
----------
None
Returns
-------
home_dir : string
best guess at location of home directory
"""
return os.path.expanduser('~')
def get_nipy_user_dir():
"""Get the NIPY user directory
This uses the logic in `get_home_dir` to find the home directory
and the adds either .nipy or _nipy to the end of the path.
We check first in environment variable ``NIPY_USER_DIR``, otherwise
returning the default of ``<homedir>/.nipy`` (Unix) or
``<homedir>/_nipy`` (Windows)
The path may well not exist; code using this routine should not
expect the directory to exist.
Parameters
----------
None
Returns
-------
nipy_dir : string
path to user's NIPY configuration directory
Examples
--------
>>> pth = get_nipy_user_dir()
"""
try:
return os.path.abspath(os.environ['NIPY_USER_DIR'])
except KeyError:
pass
home_dir = get_home_dir()
if os.name == 'posix':
sdir = '.nipy'
else:
sdir = '_nipy'
return pjoin(home_dir, sdir)
def get_nipy_system_dir():
''' Get systemwide NIPY configuration file directory
On posix systems this will be ``/etc/nipy``.
On Windows, the directory is less useful, but by default it will be
``C:\etc\nipy``
The path may well not exist; code using this routine should not
expect the directory to exist.
Parameters
----------
None
Returns
-------
nipy_dir : string
path to systemwide NIPY configuration directory
Examples
--------
>>> pth = get_nipy_system_dir()
'''
if os.name == 'nt':
return r'C:\etc\nipy'
if os.name == 'posix':
return '/etc/nipy'
| 21.705263 | 71 | 0.613967 |
c5d1b03f6c0adfa14beb0d0872197de8417ac336 | 11,432 | py | Python | homeassistant/components/seventeentrack/sensor.py | claret-srl/core | 317068a3784648defadead1c2e6e8de29e20b696 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/seventeentrack/sensor.py | claret-srl/core | 317068a3784648defadead1c2e6e8de29e20b696 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/seventeentrack/sensor.py | claret-srl/core | 317068a3784648defadead1c2e6e8de29e20b696 | [
"Apache-2.0"
] | null | null | null | """Support for package tracking sensors from 17track.net."""
from datetime import timedelta
import logging
from py17track import Client as SeventeenTrackClient
from py17track.errors import SeventeenTrackError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
ATTR_LOCATION,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.event import async_call_later
from homeassistant.util import Throttle, slugify
_LOGGER = logging.getLogger(__name__)
ATTR_DESTINATION_COUNTRY = "destination_country"
ATTR_INFO_TEXT = "info_text"
ATTR_TIMESTAMP = "timestamp"
ATTR_ORIGIN_COUNTRY = "origin_country"
ATTR_PACKAGES = "packages"
ATTR_PACKAGE_TYPE = "package_type"
ATTR_STATUS = "status"
ATTR_TRACKING_INFO_LANGUAGE = "tracking_info_language"
ATTR_TRACKING_NUMBER = "tracking_number"
CONF_SHOW_ARCHIVED = "show_archived"
CONF_SHOW_DELIVERED = "show_delivered"
DATA_PACKAGES = "package_data"
DATA_SUMMARY = "summary_data"
DEFAULT_ATTRIBUTION = "Data provided by 17track.net"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=10)
UNIQUE_ID_TEMPLATE = "package_{0}_{1}"
ENTITY_ID_TEMPLATE = "sensor.seventeentrack_package_{0}"
NOTIFICATION_DELIVERED_ID = "package_delivered_{0}"
NOTIFICATION_DELIVERED_TITLE = "Package {0} delivered"
NOTIFICATION_DELIVERED_MESSAGE = (
"Package Delivered: {0}<br />Visit 17.track for more information: "
"https://t.17track.net/track#nums={1}"
)
VALUE_DELIVERED = "Delivered"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SHOW_ARCHIVED, default=False): cv.boolean,
vol.Optional(CONF_SHOW_DELIVERED, default=False): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Configure the platform and add the sensors."""
session = aiohttp_client.async_get_clientsession(hass)
client = SeventeenTrackClient(session=session)
try:
login_result = await client.profile.login(
config[CONF_USERNAME], config[CONF_PASSWORD]
)
if not login_result:
_LOGGER.error("Invalid username and password provided")
return
except SeventeenTrackError as err:
_LOGGER.error("There was an error while logging in: %s", err)
return
scan_interval = config.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
data = SeventeenTrackData(
client,
async_add_entities,
scan_interval,
config[CONF_SHOW_ARCHIVED],
config[CONF_SHOW_DELIVERED],
str(hass.config.time_zone),
)
await data.async_update()
class SeventeenTrackSummarySensor(SensorEntity):
"""Define a summary sensor."""
def __init__(self, data, status, initial_state):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._data = data
self._state = initial_state
self._status = status
@property
def available(self):
"""Return whether the entity is available."""
return self._state is not None
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
return f"Seventeentrack Packages {self._status}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Safegate Pro friendly identifier for this entity."""
return f"summary_{self._data.account_id}_{slugify(self._status)}"
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return "packages"
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
package_data = []
for package in self._data.packages.values():
if package.status != self._status:
continue
package_data.append(
{
ATTR_FRIENDLY_NAME: package.friendly_name,
ATTR_INFO_TEXT: package.info_text,
ATTR_TIMESTAMP: package.timestamp,
ATTR_STATUS: package.status,
ATTR_LOCATION: package.location,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
)
if package_data:
self._attrs[ATTR_PACKAGES] = package_data
self._state = self._data.summary.get(self._status)
class SeventeenTrackPackageSensor(SensorEntity):
"""Define an individual package sensor."""
def __init__(self, data, package):
"""Initialize."""
self._attrs = {
ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION,
ATTR_DESTINATION_COUNTRY: package.destination_country,
ATTR_INFO_TEXT: package.info_text,
ATTR_TIMESTAMP: package.timestamp,
ATTR_LOCATION: package.location,
ATTR_ORIGIN_COUNTRY: package.origin_country,
ATTR_PACKAGE_TYPE: package.package_type,
ATTR_TRACKING_INFO_LANGUAGE: package.tracking_info_language,
ATTR_TRACKING_NUMBER: package.tracking_number,
}
self._data = data
self._friendly_name = package.friendly_name
self._state = package.status
self._tracking_number = package.tracking_number
self.entity_id = ENTITY_ID_TEMPLATE.format(self._tracking_number)
@property
def available(self):
"""Return whether the entity is available."""
return self._data.packages.get(self._tracking_number) is not None
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return "mdi:package"
@property
def name(self):
"""Return the name."""
name = self._friendly_name
if not name:
name = self._tracking_number
return f"Seventeentrack Package: {name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Safegate Pro friendly identifier for this entity."""
return UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number)
async def async_update(self):
"""Update the sensor."""
await self._data.async_update()
if not self.available:
# Entity cannot be removed while its being added
async_call_later(self.hass, 1, self._remove)
return
package = self._data.packages.get(self._tracking_number, None)
# If the user has elected to not see delivered packages and one gets
# delivered, post a notification:
if package.status == VALUE_DELIVERED and not self._data.show_delivered:
self._notify_delivered()
# Entity cannot be removed while its being added
async_call_later(self.hass, 1, self._remove)
return
self._attrs.update(
{
ATTR_INFO_TEXT: package.info_text,
ATTR_TIMESTAMP: package.timestamp,
ATTR_LOCATION: package.location,
}
)
self._state = package.status
self._friendly_name = package.friendly_name
async def _remove(self, *_):
"""Remove entity itself."""
await self.async_remove(force_remove=True)
reg = await self.hass.helpers.entity_registry.async_get_registry()
entity_id = reg.async_get_entity_id(
"sensor",
"seventeentrack",
UNIQUE_ID_TEMPLATE.format(self._data.account_id, self._tracking_number),
)
if entity_id:
reg.async_remove(entity_id)
def _notify_delivered(self):
"""Notify when package is delivered."""
_LOGGER.info("Package delivered: %s", self._tracking_number)
identification = (
self._friendly_name if self._friendly_name else self._tracking_number
)
message = NOTIFICATION_DELIVERED_MESSAGE.format(
identification, self._tracking_number
)
title = NOTIFICATION_DELIVERED_TITLE.format(identification)
notification_id = NOTIFICATION_DELIVERED_TITLE.format(self._tracking_number)
self.hass.components.persistent_notification.create(
message, title=title, notification_id=notification_id
)
class SeventeenTrackData:
"""Define a data handler for 17track.net."""
def __init__(
self,
client,
async_add_entities,
scan_interval,
show_archived,
show_delivered,
timezone,
):
"""Initialize."""
self._async_add_entities = async_add_entities
self._client = client
self._scan_interval = scan_interval
self._show_archived = show_archived
self.account_id = client.profile.account_id
self.packages = {}
self.show_delivered = show_delivered
self.timezone = timezone
self.summary = {}
self.async_update = Throttle(self._scan_interval)(self._async_update)
self.first_update = True
async def _async_update(self):
"""Get updated data from 17track.net."""
try:
packages = await self._client.profile.packages(
show_archived=self._show_archived, tz=self.timezone
)
_LOGGER.debug("New package data received: %s", packages)
new_packages = {p.tracking_number: p for p in packages}
to_add = set(new_packages) - set(self.packages)
_LOGGER.debug("Will add new tracking numbers: %s", to_add)
if to_add:
self._async_add_entities(
[
SeventeenTrackPackageSensor(self, new_packages[tracking_number])
for tracking_number in to_add
],
True,
)
self.packages = new_packages
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving packages: %s", err)
try:
self.summary = await self._client.profile.summary(
show_archived=self._show_archived
)
_LOGGER.debug("New summary data received: %s", self.summary)
# creating summary sensors on first update
if self.first_update:
self.first_update = False
self._async_add_entities(
[
SeventeenTrackSummarySensor(self, status, quantity)
for status, quantity in self.summary.items()
],
True,
)
except SeventeenTrackError as err:
_LOGGER.error("There was an error retrieving the summary: %s", err)
self.summary = {}
| 32.022409 | 88 | 0.64127 |
c2214028c777c7c8ac00cc2de5f084bb9b743bd9 | 4,825 | py | Python | setup.py | hubutui/cyvlfeat | a0cfe17b0cc6fe14a9270b50592b4e0b0ec8ed1c | [
"BSD-2-Clause"
] | null | null | null | setup.py | hubutui/cyvlfeat | a0cfe17b0cc6fe14a9270b50592b4e0b0ec8ed1c | [
"BSD-2-Clause"
] | null | null | null | setup.py | hubutui/cyvlfeat | a0cfe17b0cc6fe14a9270b50592b4e0b0ec8ed1c | [
"BSD-2-Clause"
] | null | null | null | import os
import platform
import site
from setuptools import setup, find_packages, Extension
SYS_PLATFORM = platform.system().lower()
IS_LINUX = 'linux' in SYS_PLATFORM
IS_OSX = 'darwin' == SYS_PLATFORM
IS_WIN = 'windows' == SYS_PLATFORM
# Get Numpy include path without importing it
NUMPY_INC_PATHS = [os.path.join(r, 'numpy', 'core', 'include')
for r in site.getsitepackages() if
os.path.isdir(os.path.join(r, 'numpy', 'core', 'include'))]
if len(NUMPY_INC_PATHS) == 0:
try:
import numpy as np
except ImportError:
raise ValueError("Could not find numpy include dir and numpy not installed before build - "
"cannot proceed with compilation of cython modules.")
else:
# just ask numpy for it's include dir
NUMPY_INC_PATHS = [np.get_include()]
elif len(NUMPY_INC_PATHS) > 1:
print("Found {} numpy include dirs: "
"{}".format(len(NUMPY_INC_PATHS), ', '.join(NUMPY_INC_PATHS)))
print("Taking first (highest precedence on path): {}".format(
NUMPY_INC_PATHS[0]))
NUMPY_INC_PATH = NUMPY_INC_PATHS[0]
# ---- C/C++ EXTENSIONS ---- #
# Stolen (and modified) from the Cython documentation:
# http://cython.readthedocs.io/en/latest/src/reference/compilation.html
def no_cythonize(extensions, **_ignore):
import os.path as op
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
if not op.exists(sfile):
raise ValueError('Cannot find pre-compiled source file '
'({}) - please install Cython'.format(sfile))
sources.append(sfile)
extension.sources[:] = sources
return extensions
def build_extension_from_pyx(pyx_path, extra_sources_paths=None):
# If we are building from the conda folder,
# then we know we can manually copy some files around
# because we have control of the setup. If you are
# building this manually or pip installing, you must satisfy
# that the vlfeat vl folder is on the PATH (for the headers)
# and that the vl.dll file is visible to the build system
# as well.
include_dirs = [NUMPY_INC_PATH]
library_dirs = []
if IS_WIN:
include_dirs.append(os.environ['LIBRARY_INC'])
library_dirs.append(os.environ['LIBRARY_BIN'])
if extra_sources_paths is None:
extra_sources_paths = []
extra_sources_paths.insert(0, pyx_path)
ext = Extension(name=pyx_path[:-4].replace('/', '.'),
sources=extra_sources_paths,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=['vl'],
language='c')
if IS_LINUX or IS_OSX:
ext.extra_compile_args.append('-Wno-unused-function')
if IS_OSX:
ext.extra_link_args.append('-headerpad_max_install_names')
return ext
try:
from Cython.Build import cythonize
except ImportError:
import warnings
cythonize = no_cythonize
warnings.warn('Unable to import Cython - attempting to build using the '
'pre-compiled C++ files.')
cython_modules = [
build_extension_from_pyx('cyvlfeat/fisher/cyfisher.pyx'),
build_extension_from_pyx('cyvlfeat/generic/generic.pyx'),
build_extension_from_pyx('cyvlfeat/gmm/cygmm.pyx'),
build_extension_from_pyx('cyvlfeat/hog/cyhog.pyx'),
build_extension_from_pyx('cyvlfeat/kmeans/cykmeans.pyx'),
build_extension_from_pyx('cyvlfeat/quickshift/cyquickshift.pyx'),
build_extension_from_pyx('cyvlfeat/sift/cysift.pyx'),
build_extension_from_pyx('cyvlfeat/vlad/cyvlad.pyx'),
]
cython_exts = cythonize(cython_modules, quiet=True)
def get_version_and_cmdclass(package_name):
import os
from importlib.util import module_from_spec, spec_from_file_location
spec = spec_from_file_location('version',
os.path.join(package_name, '_version.py'))
module = module_from_spec(spec)
spec.loader.exec_module(module)
return module.__version__, module.cmdclass
version, cmdclass = get_version_and_cmdclass('cyvlfeat')
setup(
name='cyvlfeat',
version=version,
cmdclass=cmdclass,
description='Cython wrapper of the VLFeat toolkit',
url='https://github.com/menpo/cyvlfeat',
author='Patrick Snape',
author_email='patricksnape@gmail.com',
ext_modules=cython_exts,
package_data={'cyvlfeat': ['data/*.mat', 'data/ascent.descr', 'data/ascent.frame']},
packages=find_packages()
)
| 36.007463 | 99 | 0.652435 |
8e562f4d3eba41c27d28e287b68808ea546269fe | 7,267 | py | Python | scripts/star.py | jason-neal/Starfish | 4ffa45e0190fb6f3262511d57d1a563e5ee711de | [
"BSD-3-Clause"
] | 1 | 2017-07-10T00:06:36.000Z | 2017-07-10T00:06:36.000Z | scripts/star.py | jason-neal/Starfish | 4ffa45e0190fb6f3262511d57d1a563e5ee711de | [
"BSD-3-Clause"
] | null | null | null | scripts/star.py | jason-neal/Starfish | 4ffa45e0190fb6f3262511d57d1a563e5ee711de | [
"BSD-3-Clause"
] | 5 | 2016-06-11T09:48:16.000Z | 2019-08-07T19:52:41.000Z | #!/usr/bin/env python
# All of the argument parsing is done in the `parallel.py` module.
import numpy as np
import Starfish
from Starfish import parallel
from Starfish.parallel import args
from Starfish.model import ThetaParam, PhiParam
if args.generate:
model = parallel.OptimizeTheta(debug=True)
# Now that the different processes have been forked, initialize them
pconns, cconns, ps = parallel.initialize(model)
pars = ThetaParam.from_dict(Starfish.config["Theta"])
for ((spectrum_id, order_id), pconn) in pconns.items():
#Parse the parameters into what needs to be sent to each Model here.
pconn.send(("LNPROB", pars))
pconn.recv() # Receive and discard the answer so we can send the save
pconn.send(("SAVE", None))
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
if args.optimize == "Theta":
# Check to see if the order JSONs exist, if so, then recreate the noise structure according to these.
# Otherwise assume white noise.
model = parallel.OptimizeTheta(debug=True)
# Now that the different processes have been forked, initialize them
pconns, cconns, ps = parallel.initialize(model)
def fprob(p):
# Assume p is [temp, logg, Z, vz, vsini, logOmega]
pars = ThetaParam(grid=p[0:3], vz=p[3], vsini=p[4], logOmega=p[5])
#Distribute the calculation to each process
for ((spectrum_id, order_id), pconn) in pconns.items():
#Parse the parameters into what needs to be sent to each Model here.
pconn.send(("LNPROB", pars))
#Collect the answer from each process
lnps = np.empty((len(Starfish.data["orders"]),))
for i, pconn in enumerate(pconns.values()):
lnps[i] = pconn.recv()
s = np.sum(lnps)
print(pars, "lnp:", s)
if s == -np.inf:
return 1e99
else:
return -s
start = Starfish.config["Theta"]
p0 = np.array(start["grid"] + [start["vz"], start["vsini"], start["logOmega"]])
from scipy.optimize import fmin
p = fmin(fprob, p0, maxiter=10000, maxfun=10000)
print(p)
pars = ThetaParam(grid=p[0:3], vz=p[3], vsini=p[4], logOmega=p[5])
pars.save()
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
if args.initPhi:
# Figure out how many models and orders we have
i_last = len(Starfish.data["orders"]) - 1
for spec_id in range(len(Starfish.data["files"])):
for i, order in enumerate(Starfish.data["orders"]):
fix_c0 = True if i==i_last else False
if fix_c0:
cheb = np.zeros((Starfish.config["cheb_degree"] - 1,))
else:
cheb = np.zeros((Starfish.config["cheb_degree"],))
# For each order, create a Phi with these values
# Automatically reads all of the Phi parameters from config.yaml
phi = PhiParam(spectrum_id=spec_id, order=int(order), fix_c0=fix_c0, cheb=cheb)
# Write to CWD using predetermined format string
phi.save()
if args.optimize == "Cheb":
model = parallel.OptimizeCheb(debug=True)
# Now that the different processes have been forked, initialize them
pconns, cconns, ps = parallel.initialize(model)
# Initialize to the basics
pars = ThetaParam.from_dict(Starfish.config["Theta"])
#Distribute the calculation to each process
for ((spectrum_id, order_id), pconn) in pconns.items():
#Parse the parameters into what needs to be sent to each Model here.
pconn.send(("LNPROB", pars))
pconn.recv() # Receive and discard the answer so we can send the optimize
pconn.send(("OPTIMIZE_CHEB", None))
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
if args.sample == "ThetaCheb" or args.sample == "ThetaPhi" or args.sample == "ThetaPhiLines":
if args.sample == "ThetaCheb":
model = parallel.SampleThetaCheb(debug=True)
if args.sample == "ThetaPhi":
model = parallel.SampleThetaPhi(debug=True)
if args.sample == "ThetaPhiLines":
model = parallel.SampleThetaPhiLines(debug=True)
pconns, cconns, ps = parallel.initialize(model)
# These functions store the variables pconns, cconns, ps.
def lnprob(p):
pars = ThetaParam(grid=p[0:3], vz=p[3], vsini=p[4], logOmega=p[5])
#Distribute the calculation to each process
for ((spectrum_id, order_id), pconn) in pconns.items():
pconn.send(("LNPROB", pars))
#Collect the answer from each process
lnps = np.empty((len(Starfish.data["orders"]),))
for i, pconn in enumerate(pconns.values()):
lnps[i] = pconn.recv()
result = np.sum(lnps) # + lnprior
print("proposed:", p, result)
return result
def query_lnprob():
for ((spectrum_id, order_id), pconn) in pconns.items():
pconn.send(("GET_LNPROB", None))
#Collect the answer from each process
lnps = np.empty((len(Starfish.data["orders"]),))
for i, pconn in enumerate(pconns.values()):
lnps[i] = pconn.recv()
result = np.sum(lnps) # + lnprior
print("queried:", result)
return result
def acceptfn():
print("Calling acceptfn")
for ((spectrum_id, order_id), pconn) in pconns.items():
pconn.send(("DECIDE", True))
def rejectfn():
print("Calling rejectfn")
for ((spectrum_id, order_id), pconn) in pconns.items():
pconn.send(("DECIDE", False))
from Starfish.samplers import StateSampler
start = Starfish.config["Theta"]
p0 = np.array(start["grid"] + [start["vz"], start["vsini"], start["logOmega"]])
jump = Starfish.config["Theta_jump"]
cov = np.diag(np.array(jump["grid"] + [jump["vz"], jump["vsini"], jump["logOmega"]])**2)
if args.use_cov:
try:
cov = np.load('opt_jump.npy')
print("Found a local optimal jump matrix.")
except FileNotFoundError:
print("No optimal jump matrix found, using diagonal jump matrix.")
sampler = StateSampler(lnprob, p0, cov, query_lnprob=query_lnprob, acceptfn=acceptfn, rejectfn=rejectfn, debug=True, outdir=Starfish.routdir)
p, lnprob, state = sampler.run_mcmc(p0, N=args.samples, incremental_save=args.incremental_save)
print("Final", p)
sampler.write()
# Kill all of the orders
for pconn in pconns.values():
pconn.send(("FINISH", None))
pconn.send(("DIE", None))
# Join on everything and terminate
for p in ps.values():
p.join()
p.terminate()
import sys;sys.exit()
| 32.297778 | 145 | 0.619375 |
86fab12dd2d895fcd2409d3245c0f2c84d448542 | 349 | py | Python | Universal_Asynchronous_Receiver_and_Tansmitter_Examples/clientB.py | yingshaoxo/Suicide-Squad | cadbd0d48e860a8747b59190fc67a5a114c3462b | [
"MIT"
] | 4 | 2021-02-05T15:19:41.000Z | 2021-04-29T23:13:54.000Z | Universal_Asynchronous_Receiver_and_Tansmitter_Examples/clientB.py | yingshaoxo/Suicide-Squad | cadbd0d48e860a8747b59190fc67a5a114c3462b | [
"MIT"
] | null | null | null | Universal_Asynchronous_Receiver_and_Tansmitter_Examples/clientB.py | yingshaoxo/Suicide-Squad | cadbd0d48e860a8747b59190fc67a5a114c3462b | [
"MIT"
] | null | null | null | from UART_Python_Lib import *
my_transmission = MyTransmissionProtocol("/dev/ttyUSB1")
my_transmission.write_json({
"me": 21,
"you": 21,
"someone": 100
})
print(my_transmission.read_safely())
i = 255
while 1:
my_transmission.write_safely(hex_to_bytes(text_to_hex(str(i)+"yingshaoxo")))
i -= 1
if i == 0:
i = 255
| 17.45 | 80 | 0.661891 |
25d27a0474ecc75e25717b71bfe85249479a78e8 | 171 | py | Python | src/drctest/models.py | appstore-zencore/django-redis-counter | 9f9d35293e7fde8429ea6f431b76976be78be6e3 | [
"MIT"
] | 4 | 2018-03-15T12:47:15.000Z | 2019-09-04T08:36:40.000Z | src/drctest/models.py | appstore-zencore/django-redis-counter | 9f9d35293e7fde8429ea6f431b76976be78be6e3 | [
"MIT"
] | 1 | 2018-03-21T01:12:39.000Z | 2018-03-21T01:12:39.000Z | src/drctest/models.py | appstore-zencore/django-redis-counter | 9f9d35293e7fde8429ea6f431b76976be78be6e3 | [
"MIT"
] | null | null | null | from django.db import models
from drc.models import Counter
class Page(models.Model):
title = models.CharField(max_length=32)
class PageCounter(Counter):
pass
| 15.545455 | 43 | 0.754386 |
19708d5183994630b437024c7c0bcebffb70fd11 | 4,793 | py | Python | warps/resources/filter_bank.py | oscillating-gate/eurorack | 35bf03aa35b01a7a4a9b0a0ca2898677cd3a9f6a | [
"MIT"
] | 233 | 2018-07-02T16:49:36.000Z | 2022-02-27T21:45:39.000Z | warps/resources/filter_bank.py | oscillating-gate/eurorack | 35bf03aa35b01a7a4a9b0a0ca2898677cd3a9f6a | [
"MIT"
] | 24 | 2018-07-09T11:32:15.000Z | 2022-01-07T01:45:43.000Z | warps/resources/filter_bank.py | oscillating-gate/eurorack | 35bf03aa35b01a7a4a9b0a0ca2898677cd3a9f6a | [
"MIT"
] | 24 | 2018-07-14T21:55:30.000Z | 2021-05-04T04:20:34.000Z | #!/usr/bin/python2.5
#
# Copyright 2014 Olivier Gillet.
#
# Author: Olivier Gillet (ol.gillet@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Lookup table definitions.
import numpy
import pylab
import scipy.signal
def pole_pair_to_f_fq(pole_pair):
fq = 1 - pole_pair.prod()
f = -(2 - fq - (pole_pair.sum())) ** 0.5
return f.real, fq.real
def modified_chamberlin(f, fq, x, mode='bp'):
lp = 0.0
bp = 0.0
y = numpy.zeros(x.shape)
x_ = 0.0
coefficient = 1.0 if mode == 'bp' else 0.0
for i in xrange(len(y)):
lp += f * bp
bp += -fq * bp -f * lp + (x[i] + x_ * coefficient)
x_ = x[i]
if mode =='bp':
y[i] = fq * bp
elif mode == 'lp':
y[i] = f * lp
elif mode == 'hp':
y[i] = x_ - lp * f - bp * fq
return y
SAMPLE_RATE = 96000
IR_SIZE = 2048
sample_rates = [SAMPLE_RATE / 12] * 13
sample_rates += [SAMPLE_RATE / 3] * 6
sample_rates += [SAMPLE_RATE] * 1
num_bands = len(sample_rates)
interval = 2 ** (1 / 3.0)
first_frequency = 110 / interval
frequencies = first_frequency * (interval ** numpy.arange(0, num_bands))
filters = []
responses = {}
reconstruction = {}
generate_figures = __name__ == '__main__'
for index, (frequency, sr) in enumerate(zip(frequencies, sample_rates)):
if not sr in reconstruction:
reconstruction[sr] = [0.0, 0.0]
responses[sr] = []
frequency = frequency / (sr * 0.5)
if index == 0:
w = frequency
z, p, k = scipy.signal.cheby1(4, 0.5, w, 'lowpass', output='zpk')
svf_mode = 'lp'
gain = 1.0
elif index == num_bands - 1:
w = frequency
z, p, k = scipy.signal.cheby1(4, 0.25, w, 'highpass', output='zpk')
svf_mode = 'hp'
gain = 21 * frequency
else:
w = [frequency / (interval ** 0.5), frequency * (interval ** 0.5)]
z, p, k = scipy.signal.butter(2, w, 'bandpass', output='zpk')
svf_mode = 'bp'
gain = 0.25
# Filter using direct form
out = numpy.eye(IR_SIZE, 1).ravel()
b, a = scipy.signal.zpk2tf(z, p, k)
out = scipy.signal.lfilter(b, a, out)
out = scipy.signal.lfilter(b, a, out)
reconstruction[sr][0] += out
responses[sr] += [out]
# Filter using modified Chamberlin filter
out = numpy.eye(IR_SIZE, 1).ravel() * gain
coefficients = [0, 0, 0]
for i in xrange(2):
f, fq = pole_pair_to_f_fq(p[i*2:i*2 + 2])
out = modified_chamberlin(f, fq, out, svf_mode)
out = modified_chamberlin(f, fq, out, svf_mode)
coefficients += [f, fq]
delay = (numpy.arange(len(out)) * out * out).sum() / (out * out).sum()
# Completely empirical fixes to the delay to maximize the flatness of the
# total impulse response.
if index == num_bands - 1:
delay += 4
coefficients[0] = SAMPLE_RATE / sr
coefficients[1] = numpy.floor(delay)
coefficients[2] = gain
filters += [('%3.0f_%d' % (frequency * 0.5 * sr, sr), coefficients)]
reconstruction[sr][1] += out
if generate_figures:
pylab.figure(figsize=(20,8))
n = len(responses.keys())
for row, sr in enumerate(sorted(responses.keys())):
f = numpy.arange(IR_SIZE / 2 + 1) / float(IR_SIZE) * sr
for column, plots in enumerate([reconstruction[sr], responses[sr]]):
pylab.subplot(2, n, column * n + row + 1)
for r in plots:
sy = numpy.log10(numpy.abs(numpy.fft.rfft(r)) + 1e-20) * 20.0
pylab.semilogx(f, sy)
pylab.xlim(80, sr / 2)
pylab.ylim(-36, 12)
pylab.xlabel('Frequency (Hz)')
pylab.ylabel('Gain (dB)')
if len(plots) == 2:
pylab.ylim(-4, 3)
#pylab.legend(['Direct form', 'Chamberlin'])
pylab.savefig('filter_bank.pdf')
# pylab.show()
pylab.close()
| 29.770186 | 79 | 0.634258 |
b9f488f8d1485f277963c19eb703f013f507f2c9 | 1,200 | py | Python | tests/compiler/test_library_parsing.py | pretl/ALIGN-public | 4b03042d9e96fa669740427842b0bf268b0c9a86 | [
"BSD-3-Clause"
] | null | null | null | tests/compiler/test_library_parsing.py | pretl/ALIGN-public | 4b03042d9e96fa669740427842b0bf268b0c9a86 | [
"BSD-3-Clause"
] | null | null | null | tests/compiler/test_library_parsing.py | pretl/ALIGN-public | 4b03042d9e96fa669740427842b0bf268b0c9a86 | [
"BSD-3-Clause"
] | null | null | null | from align.schema.types import set_context
import pathlib
import pytest
from align.schema.parser import SpiceParser
from align.schema import constraint
@pytest.fixture
def library():
parser = SpiceParser()
align_home = pathlib.Path(__file__).resolve().parent.parent / "files"
basic_lib_path = align_home / "basic_template.sp"
with open(basic_lib_path) as f:
lines = f.read()
parser.parse(lines)
user_lib_path = align_home / "user_template.sp"
with open(user_lib_path) as f:
lines = f.read()
parser.parse(lines)
return parser.library
def test_basic_lib(library):
assert len(library.find("DP_PMOS_B").elements) == 2
assert len(library.find("CASCODED_CMC_PMOS").elements) == 4
assert len(library.find("INV_B").elements) == 2
assert len(library) == 54
def test_constraint(library):
assert len(library.find("DP_PMOS_B").constraints) == 3
dp_const = library.find("DP_PMOS_B").constraints
with set_context(dp_const):
x = constraint.SymmetricBlocks(direction="V", pairs=[["M0", "M1"]])
assert x in dp_const
assert dp_const[0].constraint == "symmetric_blocks"
assert dp_const[0].pairs == [["M0", "M1"]]
| 31.578947 | 75 | 0.6975 |
fb914d39703bb87cf046e118d5f4e0298d33b622 | 8,536 | py | Python | docusign_esign/models/bulk_envelopes_response.py | hunk/docusign-python-client | a643c42c1236715e74eef6fc279a1b29da1b5455 | [
"MIT"
] | null | null | null | docusign_esign/models/bulk_envelopes_response.py | hunk/docusign-python-client | a643c42c1236715e74eef6fc279a1b29da1b5455 | [
"MIT"
] | null | null | null | docusign_esign/models/bulk_envelopes_response.py | hunk/docusign-python-client | a643c42c1236715e74eef6fc279a1b29da1b5455 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign.
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class BulkEnvelopesResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, bulk_envelope_statuses=None, end_position=None, next_uri=None, previous_uri=None, result_set_size=None, start_position=None, total_set_size=None):
"""
BulkEnvelopesResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'bulk_envelope_statuses': 'list[BulkEnvelopeStatus]',
'end_position': 'str',
'next_uri': 'str',
'previous_uri': 'str',
'result_set_size': 'str',
'start_position': 'str',
'total_set_size': 'str'
}
self.attribute_map = {
'bulk_envelope_statuses': 'bulkEnvelopeStatuses',
'end_position': 'endPosition',
'next_uri': 'nextUri',
'previous_uri': 'previousUri',
'result_set_size': 'resultSetSize',
'start_position': 'startPosition',
'total_set_size': 'totalSetSize'
}
self._bulk_envelope_statuses = bulk_envelope_statuses
self._end_position = end_position
self._next_uri = next_uri
self._previous_uri = previous_uri
self._result_set_size = result_set_size
self._start_position = start_position
self._total_set_size = total_set_size
@property
def bulk_envelope_statuses(self):
"""
Gets the bulk_envelope_statuses of this BulkEnvelopesResponse.
Reserved: TBD
:return: The bulk_envelope_statuses of this BulkEnvelopesResponse.
:rtype: list[BulkEnvelopeStatus]
"""
return self._bulk_envelope_statuses
@bulk_envelope_statuses.setter
def bulk_envelope_statuses(self, bulk_envelope_statuses):
"""
Sets the bulk_envelope_statuses of this BulkEnvelopesResponse.
Reserved: TBD
:param bulk_envelope_statuses: The bulk_envelope_statuses of this BulkEnvelopesResponse.
:type: list[BulkEnvelopeStatus]
"""
self._bulk_envelope_statuses = bulk_envelope_statuses
@property
def end_position(self):
"""
Gets the end_position of this BulkEnvelopesResponse.
The last position in the result set.
:return: The end_position of this BulkEnvelopesResponse.
:rtype: str
"""
return self._end_position
@end_position.setter
def end_position(self, end_position):
"""
Sets the end_position of this BulkEnvelopesResponse.
The last position in the result set.
:param end_position: The end_position of this BulkEnvelopesResponse.
:type: str
"""
self._end_position = end_position
@property
def next_uri(self):
"""
Gets the next_uri of this BulkEnvelopesResponse.
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null.
:return: The next_uri of this BulkEnvelopesResponse.
:rtype: str
"""
return self._next_uri
@next_uri.setter
def next_uri(self, next_uri):
"""
Sets the next_uri of this BulkEnvelopesResponse.
The URI to the next chunk of records based on the search request. If the endPosition is the entire results of the search, this is null.
:param next_uri: The next_uri of this BulkEnvelopesResponse.
:type: str
"""
self._next_uri = next_uri
@property
def previous_uri(self):
"""
Gets the previous_uri of this BulkEnvelopesResponse.
The postal code for the billing address.
:return: The previous_uri of this BulkEnvelopesResponse.
:rtype: str
"""
return self._previous_uri
@previous_uri.setter
def previous_uri(self, previous_uri):
"""
Sets the previous_uri of this BulkEnvelopesResponse.
The postal code for the billing address.
:param previous_uri: The previous_uri of this BulkEnvelopesResponse.
:type: str
"""
self._previous_uri = previous_uri
@property
def result_set_size(self):
"""
Gets the result_set_size of this BulkEnvelopesResponse.
The number of results returned in this response.
:return: The result_set_size of this BulkEnvelopesResponse.
:rtype: str
"""
return self._result_set_size
@result_set_size.setter
def result_set_size(self, result_set_size):
"""
Sets the result_set_size of this BulkEnvelopesResponse.
The number of results returned in this response.
:param result_set_size: The result_set_size of this BulkEnvelopesResponse.
:type: str
"""
self._result_set_size = result_set_size
@property
def start_position(self):
"""
Gets the start_position of this BulkEnvelopesResponse.
Starting position of the current result set.
:return: The start_position of this BulkEnvelopesResponse.
:rtype: str
"""
return self._start_position
@start_position.setter
def start_position(self, start_position):
"""
Sets the start_position of this BulkEnvelopesResponse.
Starting position of the current result set.
:param start_position: The start_position of this BulkEnvelopesResponse.
:type: str
"""
self._start_position = start_position
@property
def total_set_size(self):
"""
Gets the total_set_size of this BulkEnvelopesResponse.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response.
:return: The total_set_size of this BulkEnvelopesResponse.
:rtype: str
"""
return self._total_set_size
@total_set_size.setter
def total_set_size(self, total_set_size):
"""
Sets the total_set_size of this BulkEnvelopesResponse.
The total number of items available in the result set. This will always be greater than or equal to the value of the property returning the results in the in the response.
:param total_set_size: The total_set_size of this BulkEnvelopesResponse.
:type: str
"""
self._total_set_size = total_set_size
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 31.498155 | 179 | 0.624414 |
148ae943001cfe7c151b34e00ec788a9998d5ff8 | 1,710 | py | Python | es_example/es_notification_listener/tests/test_es_notification_listener.py | force-h2020/force-bdss-plugin-enginsoft-toy-model | f22c0ad3cc45c3b5a7f9c4fd0b20549d7dfc9aeb | [
"MIT"
] | null | null | null | es_example/es_notification_listener/tests/test_es_notification_listener.py | force-h2020/force-bdss-plugin-enginsoft-toy-model | f22c0ad3cc45c3b5a7f9c4fd0b20549d7dfc9aeb | [
"MIT"
] | null | null | null | es_example/es_notification_listener/tests/test_es_notification_listener.py | force-h2020/force-bdss-plugin-enginsoft-toy-model | f22c0ad3cc45c3b5a7f9c4fd0b20549d7dfc9aeb | [
"MIT"
] | null | null | null | import unittest
from es_example.tests.utils import captured_output
from unittest import mock
from force_bdss.api import (
MCOStartEvent, MCOProgressEvent, MCOFinishEvent, DataValue)
from es_example.es_notification_listener\
.es_notification_listener_model import (
ESNotificationListenerModel)
from es_example.es_notification_listener\
.es_notification_listener_factory import (
ESNotificationListenerFactory)
from es_example.es_notification_listener\
.es_notification_listener import (
ESNotificationListener)
class TestESNotificationListener(unittest.TestCase):
def test_initialization(self):
print("PippoTESTES")
listener = ESNotificationListener(
mock.Mock(spec=ESNotificationListenerFactory))
model = mock.Mock(spec=ESNotificationListenerModel)
with captured_output() as (out, err):
listener.initialize(model)
listener.deliver(MCOStartEvent(
parameter_names=["foo", "bar"],
kpi_names=["baz", "quux"]))
listener.deliver(MCOProgressEvent(
optimal_point=[DataValue(value=1.0), DataValue(value=2.0)],
optimal_kpis=[DataValue(value=3.0), DataValue(value=4.0)],
weights=[0.5, 0.5]
))
listener.deliver(MCOFinishEvent())
listener.finalize()
self.assertEqual(
out.getvalue(),
"Initializing\n"
"MCOStartEvent ['foo', 'bar'] ['baz', 'quux']\n"
"MCOProgressEvent [1.0, 2.0] [3.0, 4.0] [0.5, 0.5]\n"
"MCOFinishEvent\n"
"Finalizing\n"
)
| 35.625 | 76 | 0.624561 |
f66ac6bc02abd0a186006657bf70af3b0cf8631b | 6,504 | py | Python | ceed/tests/test_app/examples/stages.py | cplab/ceed | 7bae74361a67a6ff3c447b63adb4c7c57839b339 | [
"MIT"
] | 1 | 2020-08-05T15:31:06.000Z | 2020-08-05T15:31:06.000Z | ceed/tests/test_app/examples/stages.py | cplab/ceed | 7bae74361a67a6ff3c447b63adb4c7c57839b339 | [
"MIT"
] | null | null | null | ceed/tests/test_app/examples/stages.py | cplab/ceed | 7bae74361a67a6ff3c447b63adb4c7c57839b339 | [
"MIT"
] | null | null | null | from typing import List, Tuple, Union
from ceed.tests.ceed_app import CeedTestApp
from ceed.stage import CeedStage, StageFactoryBase
from ceed.shape import CeedShape, CeedShapeGroup
from .shapes import Shape
from ceed.function import FuncBase, FuncGroup, FunctionFactoryBase
from .funcs import ConstFunctionF1, LinearFunctionF1, ExponentialFunctionF1, \
CosFunctionF1, GroupFunctionF1, Function, create_funcs
from .shapes import assert_add_three_groups
def make_stage(stage_factory: StageFactoryBase, **kwargs):
return CeedStage(
stage_factory=stage_factory,
function_factory=stage_factory.function_factory,
shape_factory=stage_factory.shape_factory, **kwargs)
def create_stage_funcs(func_app, function_factory):
funcs = create_funcs(
func_app=func_app, function_factory=function_factory,
show_in_gui=False)
for func in funcs:
func.create_func()
return funcs
def create_test_stages(
stage_app: CeedTestApp = None, stage_factory: StageFactoryBase = None,
show_in_gui=True, add_func=True, add_shapes=True) -> \
Tuple[Tuple['StageWrapper'], List[Union[Shape, CeedShapeGroup]]]:
if stage_app is None:
function_factory = stage_factory.function_factory
shape_factory = stage_factory.shape_factory
else:
function_factory = stage_app.function_factory
shape_factory = stage_app.shape_factory
stage_factory = stage_app.stage_factory
# create shapes
if add_shapes:
(group, group2, group3), (shape, shape2, shape3) = \
assert_add_three_groups(
shape_factory=shape_factory, app=stage_app,
show_in_gui=show_in_gui)
shapes = [group3, shape, shape2, shape3]
else:
shapes = []
if add_func:
funcs = lambda: create_stage_funcs(
func_app=stage_app, function_factory=function_factory)
else:
funcs = lambda: []
s1 = ParaAnyStage(
stage_factory=stage_factory, show_in_gui=show_in_gui, shapes=shapes,
functions=funcs(), app=stage_app)
s2 = ParaAllStage(
stage_factory=stage_factory, show_in_gui=show_in_gui, shapes=shapes,
functions=funcs(), app=stage_app)
s3 = SerialAnyStage(
stage_factory=stage_factory, show_in_gui=show_in_gui, shapes=shapes,
functions=funcs(), app=stage_app, parent_wrapper_stage=s2)
return (s1, s2, s3), shapes
def assert_stages_same(
stage1: CeedStage, stage2: CeedStage, compare_name=False):
assert isinstance(stage1, stage2.__class__)
keys = set(stage1.get_state().keys()) | set(stage2.get_state().keys())
assert 'name' in keys
if not compare_name:
keys.remove('name')
for key in keys:
if key in ('stages', 'functions', 'shapes', 'cls'):
continue
assert getattr(stage1, key) == getattr(stage2, key)
class StageWrapper(object):
stage: CeedStage = None
app: CeedTestApp = None
stage_factory: StageFactoryBase = None
stages_container = None
name = ''
order = ''
complete_on = ''
stages: 'List[StageWrapper]' = []
parent_wrapper_stage: 'StageWrapper' = None
functions: List[Function] = []
shapes: List[Shape] = []
color_r = False
color_g = False
color_b = False
color_a = None
def __init__(
self, app: CeedTestApp = None,
stage_factory: StageFactoryBase = None,
parent_wrapper_stage: 'StageWrapper' = None,
show_in_gui=True, create_add_to_parent=False, shapes=[],
functions=[]):
self.stages = []
super().__init__()
self.app = app
if app is None:
self.stage_factory = stage_factory
else:
self.stage_factory = app.stage_factory
self.stages_container = app.stages_container
self.parent_wrapper_stage = parent_wrapper_stage
self.shapes = shapes
self.functions = functions
if show_in_gui:
self.show_in_gui()
elif create_add_to_parent:
self.create_add_to_parent()
def create_stage(self):
stage = self.stage = CeedStage(
stage_factory=self.stage_factory,
function_factory=self.stage_factory.function_factory,
shape_factory=self.stage_factory.shape_factory,
name=self.name, order=self.order, complete_on=self.complete_on,
color_r=self.color_r, color_g=self.color_g, color_b=self.color_b,
color_a=self.color_a
)
for shape in self.shapes:
if isinstance(shape, Shape):
stage.add_shape(shape.shape)
else:
stage.add_shape(shape)
for func in self.functions:
stage.add_func(func.func)
def create_add_to_parent(self):
self.create_stage()
if self.parent_wrapper_stage is None:
self.stage_factory.add_stage(
self.stage, allow_last_experiment=False)
else:
self.parent_wrapper_stage.stages.append(self)
self.parent_wrapper_stage.stage.add_stage(self.stage)
def show_in_gui(self):
self.create_add_to_parent()
if self.parent_wrapper_stage is None:
self.stages_container.show_stage(self.stage)
else:
self.stages_container.show_sub_stage(
self.stage, self.parent_wrapper_stage.stage)
def assert_init(self):
assert self.name == self.stage.name
assert self.order == self.stage.order
assert self.complete_on == self.stage.complete_on
assert self.color_r == self.stage.color_r
assert self.color_g == self.stage.color_g
assert self.color_b == self.stage.color_b
assert self.color_a == self.stage.color_a
class ParaAllStage(StageWrapper):
name = 'a parallel stage'
order = 'parallel'
complete_on = 'all'
color_r = True
color_g = True
class ParaAnyStage(StageWrapper):
name = 'a parallel page'
order = 'parallel'
complete_on = 'any'
color_g = True
class SerialAllStage(StageWrapper):
name = 'a serial stage'
order = 'serial'
complete_on = 'all'
color_r = True
color_g = True
class SerialAnyStage(StageWrapper):
name = 'a serial page'
order = 'serial'
complete_on = 'any'
color_g = True
stage_classes = (ParaAllStage, ParaAnyStage, SerialAnyStage, SerialAllStage)
| 27.443038 | 78 | 0.65698 |
412fe60a4a3a4f6db47e6674ca51e79381eac31e | 17,887 | py | Python | cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/drivers/dell_emc/vnx/test_client.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from cinder import exception
from cinder import test
from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_exception \
as storops_ex
from cinder.tests.unit.volume.drivers.dell_emc.vnx import fake_storops \
as storops
from cinder.tests.unit.volume.drivers.dell_emc.vnx import res_mock
from cinder.tests.unit.volume.drivers.dell_emc.vnx import utils
from cinder.volume.drivers.dell_emc.vnx import client as vnx_client
from cinder.volume.drivers.dell_emc.vnx import common as vnx_common
class TestCondition(test.TestCase):
@res_mock.patch_client
def test_is_lun_io_ready_false(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertFalse(r)
@res_mock.patch_client
def test_is_lun_io_ready_true(self, client, mocked):
r = vnx_client.Condition.is_lun_io_ready(mocked['lun'])
self.assertTrue(r)
@res_mock.patch_client
def test_is_lun_io_ready_exception(self, client, mocked):
self.assertRaises(exception.VolumeBackendAPIException,
vnx_client.Condition.is_lun_io_ready,
mocked['lun'])
class TestClient(test.TestCase):
def setUp(self):
super(TestClient, self).setUp()
self.origin_timeout = vnx_common.DEFAULT_TIMEOUT
vnx_common.DEFAULT_TIMEOUT = 0
def tearDown(self):
super(TestClient, self).tearDown()
vnx_common.DEFAULT_TIMEOUT = self.origin_timeout
@res_mock.patch_client
def test_create_lun(self, client, mocked):
client.create_lun(pool='pool1', name='test', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
pool = client.vnx.get_pool(name='pool1')
pool.create_lun.assert_called_with(lun_name='test',
size_gb=1,
provision=None,
tier=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_error(self, client, mocked):
self.assertRaises(storops_ex.VNXCreateLunError,
client.create_lun,
pool='pool1',
name='test',
size=1,
provision=None,
tier=None,
cg_id=None,
ignore_thresholds=False)
client.vnx.get_pool.assert_called_once_with(name='pool1')
@res_mock.patch_client
def test_create_lun_already_existed(self, client, mocked):
client.create_lun(pool='pool1', name='lun3', size=1, provision=None,
tier=None, cg_id=None, ignore_thresholds=False)
client.vnx.get_lun.assert_called_once_with(name='lun3')
@res_mock.patch_client
def test_create_lun_in_cg(self, client, mocked):
client.create_lun(
pool='pool1', name='test', size=1, provision=None,
tier=None, cg_id='cg1', ignore_thresholds=False)
@res_mock.patch_client
def test_create_lun_compression(self, client, mocked):
client.create_lun(pool='pool1', name='lun2', size=1,
provision=storops.VNXProvisionEnum.COMPRESSED,
tier=None, cg_id=None,
ignore_thresholds=False)
@res_mock.patch_client
def test_migrate_lun(self, client, mocked):
client.migrate_lun(src_id=1,
dst_id=2)
lun = client.vnx.get_lun()
lun.migrate.assert_called_with(2, storops.VNXMigrationRate.HIGH)
@unittest.skip("Skip until bug #1578986 is fixed")
@utils.patch_sleep
@res_mock.patch_client
def test_migrate_lun_with_retry(self, client, mocked, mock_sleep):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXTargetNotReadyError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
mock_sleep.assert_called_with(15)
@res_mock.patch_client
def test_session_finished_faulted(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@res_mock.patch_client
def test_session_finished_migrating(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertFalse(r)
@res_mock.patch_client
def test_session_finished_not_existed(self, client, mocked):
lun = client.vnx.get_lun()
r = client.session_finished(lun)
self.assertTrue(r)
@res_mock.patch_client
def test_migrate_lun_error(self, client, mocked):
lun = client.vnx.get_lun()
self.assertRaises(storops_ex.VNXMigrationError,
client.migrate_lun,
src_id=4,
dst_id=5)
lun.migrate.assert_called_with(5, storops.VNXMigrationRate.HIGH)
@res_mock.patch_client
def test_verify_migration(self, client, mocked):
r = client.verify_migration(1, 2, 'test_wwn')
self.assertTrue(r)
@res_mock.patch_client
def test_verify_migration_false(self, client, mocked):
r = client.verify_migration(1, 2, 'fake_wwn')
self.assertFalse(r)
@res_mock.patch_client
def test_cleanup_migration(self, client, mocked):
client.cleanup_migration(1, 2)
@res_mock.patch_client
def test_cleanup_migration_not_migrating(self, client, mocked):
client.cleanup_migration(1, 2)
@res_mock.patch_client
def test_cleanup_migration_cancel_failed(self, client, mocked):
client.cleanup_migration(1, 2)
@res_mock.patch_client
def test_get_lun_by_name(self, client, mocked):
lun = client.get_lun(name='lun_name_test_get_lun_by_name')
self.assertEqual(888, lun.lun_id)
@res_mock.patch_client
def test_delete_lun(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_smp(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_not_exist(self, client, mocked):
client.delete_lun(mocked['lun'].name)
@res_mock.patch_client
def test_delete_lun_exception(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXDeleteLunError,
'General lun delete error.',
client.delete_lun, mocked['lun'].name)
@res_mock.patch_client
def test_cleanup_async_lun(self, client, mocked):
client.cleanup_async_lun(
mocked['lun'].name,
force=True)
@res_mock.patch_client
def test_enable_compression(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
lun_obj.enable_compression.assert_called_with(ignore_thresholds=True)
@res_mock.patch_client
def test_enable_compression_on_compressed_lun(self, client, mocked):
lun_obj = mocked['lun']
client.enable_compression(lun_obj)
@res_mock.patch_client
def test_get_vnx_enabler_status(self, client, mocked):
re = client.get_vnx_enabler_status()
self.assertTrue(re.dedup_enabled)
self.assertFalse(re.compression_enabled)
self.assertTrue(re.thin_enabled)
self.assertFalse(re.fast_enabled)
self.assertTrue(re.snap_enabled)
@res_mock.patch_client
def test_lun_has_snapshot_true(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertTrue(re)
@res_mock.patch_client
def test_lun_has_snapshot_false(self, client, mocked):
re = client.lun_has_snapshot(mocked['lun'])
self.assertFalse(re)
@res_mock.patch_client
def test_create_cg(self, client, mocked):
cg = client.create_consistency_group('cg_name')
self.assertIsNotNone(cg)
@res_mock.patch_client
def test_create_cg_already_existed(self, client, mocked):
cg = client.create_consistency_group('cg_name_already_existed')
self.assertIsNotNone(cg)
@res_mock.patch_client
def test_delete_cg(self, client, mocked):
client.delete_consistency_group('deleted_name')
@res_mock.patch_client
def test_delete_cg_not_existed(self, client, mocked):
client.delete_consistency_group('not_existed')
@res_mock.patch_client
def test_expand_lun(self, client, _ignore):
client.expand_lun('lun', 10, poll=True)
@res_mock.patch_client
def test_expand_lun_not_poll(self, client, _ignore):
client.expand_lun('lun', 10, poll=False)
@res_mock.patch_client
def test_expand_lun_already_expanded(self, client, _ignore):
client.expand_lun('lun', 10)
@unittest.skip("Skip until bug #1578986 is fixed")
@utils.patch_sleep
@res_mock.patch_client
def test_expand_lun_not_ops_ready(self, client, _ignore, sleep_mock):
self.assertRaises(storops_ex.VNXLunPreparingError,
client.expand_lun, 'lun', 10)
lun = client.vnx.get_lun()
lun.expand.assert_called_once_with(10, ignore_thresholds=True)
# Called twice
lun.expand.assert_called_once_with(10, ignore_thresholds=True)
@res_mock.patch_client
def test_create_snapshot(self, client, _ignore):
client.create_snapshot('lun_test_create_snapshot',
'snap_test_create_snapshot')
lun = client.vnx.get_lun()
lun.create_snap.assert_called_once_with('snap_test_create_snapshot',
allow_rw=True,
auto_delete=False,
keep_for=None)
@res_mock.patch_client
def test_create_snapshot_snap_name_exist_error(self, client, _ignore):
client.create_snapshot('lun_name', 'snapshot_name')
@res_mock.patch_client
def test_delete_snapshot(self, client, _ignore):
client.delete_snapshot('snapshot_name')
@res_mock.patch_client
def test_delete_snapshot_delete_attached_error(self, client, _ignore):
self.assertRaises(storops_ex.VNXDeleteAttachedSnapError,
client.delete_snapshot, 'snapshot_name')
@res_mock.patch_client
def test_copy_snapshot(self, client, mocked):
client.copy_snapshot('old_name', 'new_name')
@res_mock.patch_client
def test_create_mount_point(self, client, mocked):
client.create_mount_point('lun_name', 'smp_name')
@res_mock.patch_client
def test_attach_mount_point(self, client, mocked):
client.attach_snapshot('smp_name', 'snap_name')
@res_mock.patch_client
def test_detach_mount_point(self, client, mocked):
client.detach_snapshot('smp_name')
@res_mock.patch_client
def test_modify_snapshot(self, client, mocked):
client.modify_snapshot('snap_name', True, True)
@res_mock.patch_client
def test_create_cg_snapshot(self, client, mocked):
snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
self.assertIsNotNone(snap)
@res_mock.patch_client
def test_create_cg_snapshot_already_existed(self, client, mocked):
snap = client.create_cg_snapshot('cg_snap_name', 'cg_name')
self.assertIsNotNone(snap)
@res_mock.patch_client
def test_delete_cg_snapshot(self, client, mocked):
client.delete_cg_snapshot(cg_snap_name='test_snap')
@res_mock.patch_client
def test_create_sg(self, client, mocked):
client.create_storage_group('sg_name')
@res_mock.patch_client
def test_create_sg_name_in_use(self, client, mocked):
client.create_storage_group('sg_name')
self.assertIsNotNone(client.sg_cache['sg_name'])
self.assertTrue(client.sg_cache['sg_name'].existed)
@res_mock.patch_client
def test_get_storage_group(self, client, mocked):
sg = client.get_storage_group('sg_name')
self.assertEqual('sg_name', sg.name)
@res_mock.patch_client
def test_register_initiator(self, client, mocked):
host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
client.register_initiator(mocked['sg'], host,
{'host_initiator': 'port_1'})
@res_mock.patch_client
def test_register_initiator_exception(self, client, mocked):
host = vnx_common.Host('host_name', ['host_initiator'], 'host_ip')
client.register_initiator(mocked['sg'], host,
{'host_initiator': 'port_1'})
@res_mock.patch_client
def test_ping_node(self, client, mocked):
self.assertTrue(client.ping_node(mocked['iscsi_port'], 'ip'))
@res_mock.patch_client
def test_ping_node_fail(self, client, mocked):
self.assertFalse(client.ping_node(mocked['iscsi_port'], 'ip'))
@res_mock.patch_client
def test_add_lun_to_sg(self, client, mocked):
lun = 'not_care'
self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
@res_mock.patch_client
def test_add_lun_to_sg_alu_already_attached(self, client, mocked):
lun = 'not_care'
self.assertEqual(1, client.add_lun_to_sg(mocked['sg'], lun, 3))
@res_mock.patch_client
def test_add_lun_to_sg_alu_in_use(self, client, mocked):
self.assertRaisesRegexp(storops_ex.VNXNoHluAvailableError,
'No HLU available.',
client.add_lun_to_sg,
mocked['sg'],
mocked['lun'],
3)
@res_mock.patch_client
def test_update_consistencygroup_no_lun_in_cg(self, client, mocked):
lun_1 = mocked['lun_1']
lun_2 = mocked['lun_2']
def _get_lun(lun_id):
return [x for x in (lun_1, lun_2) if x.lun_id == lun_id][0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [lun_1.lun_id, lun_2.lun_id], [])
cg.replace_member.assert_called_once_with(lun_1, lun_2)
@res_mock.patch_client
def test_update_consistencygroup_lun_in_cg(self, client, mocked):
lun_1 = mocked['lun_1']
lun_2 = mocked['lun_2']
def _get_lun(lun_id):
return [x for x in (lun_1, lun_2) if x.lun_id == lun_id][0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [lun_2.lun_id], [lun_1.lun_id])
cg.replace_member.assert_called_once_with(lun_2)
@res_mock.patch_client
def test_update_consistencygroup_remove_all(self, client, mocked):
lun_1 = mocked['lun_1']
def _get_lun(lun_id):
return [x for x in (lun_1,) if x.lun_id == lun_id][0]
client.get_lun = _get_lun
cg = mocked['cg']
client.update_consistencygroup(cg, [], [lun_1.lun_id])
cg.delete_member.assert_called_once_with(lun_1)
@res_mock.patch_client
def test_get_available_ip(self, client, mocked):
ip = client.get_available_ip()
self.assertEqual('192.168.1.5', ip)
@res_mock.patch_client
def test_create_mirror(self, client, mocked):
mv = client.create_mirror('test_mirror_name', 11)
self.assertIsNotNone(mv)
@res_mock.patch_client
def test_create_mirror_already_created(self, client, mocked):
mv = client.create_mirror('error_mirror', 12)
self.assertIsNotNone(mv)
@res_mock.patch_client
def test_delete_mirror(self, client, mocked):
client.delete_mirror('mirror_name')
@res_mock.patch_client
def test_delete_mirror_already_deleted(self, client, mocked):
client.delete_mirror('mirror_name_deleted')
@res_mock.patch_client
def test_add_image(self, client, mocked):
client.add_image('mirror_namex', '192.168.1.11', 31)
@res_mock.patch_client
def test_remove_image(self, client, mocked):
client.remove_image('mirror_remove')
@res_mock.patch_client
def test_fracture_image(self, client, mocked):
client.fracture_image('mirror_fracture')
@res_mock.patch_client
def test_sync_image(self, client, mocked):
client.sync_image('mirror_sync')
@res_mock.patch_client
def test_promote_image(self, client, mocked):
client.promote_image('mirror_promote')
@res_mock.mock_driver_input
@res_mock.patch_client
def test_get_lun_id(self, client, mocked, cinder_input):
lun_id = client.get_lun_id(cinder_input['volume'])
self.assertEqual(1, lun_id)
@res_mock.mock_driver_input
@res_mock.patch_client
def test_get_lun_id_without_provider_location(self, client, mocked,
cinder_input):
lun_id = client.get_lun_id(cinder_input['volume'])
self.assertIsInstance(lun_id, int)
self.assertEqual(mocked['lun'].lun_id, lun_id)
| 37.34238 | 77 | 0.661598 |
138a8dd84efbfea24e6297d36330d4e235e9ce20 | 267 | py | Python | tests/examine_model.py | eeris-nilm/eeris_nilm | 57e17970234a22c355d6e037bcb58fc3a67dc1f1 | [
"Apache-2.0"
] | 8 | 2020-06-17T16:31:43.000Z | 2021-07-01T02:25:27.000Z | tests/examine_model.py | eeris-nilm/eeris_nilm | 57e17970234a22c355d6e037bcb58fc3a67dc1f1 | [
"Apache-2.0"
] | 1 | 2021-04-29T12:59:12.000Z | 2021-04-29T12:59:12.000Z | tests/examine_model.py | eeris-nilm/eeris_nilm | 57e17970234a22c355d6e037bcb58fc3a67dc1f1 | [
"Apache-2.0"
] | null | null | null | import dill
import pymongo
from eeris_nilm.algorithms import livehart
mclient = pymongo.MongoClient("mongodb://localhost:27017")
mdb = mclient['eeris']
inst_doc = mdb.models.find_one({"meterId": "5e05d5c83e442d4f78db036f"})
model = dill.loads(inst_doc['modelHart'])
| 29.666667 | 71 | 0.782772 |
c4ec0e9259a4f03ae940a5f00ed55ffdbe18a231 | 27 | py | Python | src/__init__.py | ScienceStacks/FittingSurface | 7994995c7155817ea4334f10dcd21e691cee46da | [
"MIT"
] | null | null | null | src/__init__.py | ScienceStacks/FittingSurface | 7994995c7155817ea4334f10dcd21e691cee46da | [
"MIT"
] | null | null | null | src/__init__.py | ScienceStacks/FittingSurface | 7994995c7155817ea4334f10dcd21e691cee46da | [
"MIT"
] | null | null | null | import src.constants as cn
| 13.5 | 26 | 0.814815 |
07da09049c546bf94182e8d05a5f2b4335513b38 | 202,277 | py | Python | tensorflow/python/keras/backend.py | alanpurple/tensorflow | ddf538bf79cc5cf575cad2aa5b3b8a6995e378b6 | [
"Apache-2.0"
] | 4 | 2020-06-28T08:25:36.000Z | 2021-08-12T12:41:34.000Z | tensorflow/python/keras/backend.py | alanpurple/tensorflow | ddf538bf79cc5cf575cad2aa5b3b8a6995e378b6 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:12:24.000Z | 2022-02-10T02:04:13.000Z | tensorflow/python/keras/backend.py | alanpurple/tensorflow | ddf538bf79cc5cf575cad2aa5b3b8a6995e378b6 | [
"Apache-2.0"
] | 4 | 2019-11-28T12:18:07.000Z | 2021-08-01T16:12:17.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
# pylint: disable=redefined-outer-name
# pylint: disable=redefined-builtin
"""Keras backend API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import json
import os
import sys
import threading
import weakref
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import tf2
from tensorflow.python.client import session as session_module
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.eager import function as eager_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device_spec
from tensorflow.python.framework import dtypes as dtypes_module
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.engine import keras_tensor
from tensorflow.python.keras.utils import control_flow_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import ctc_ops as ctc
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients as gradients_module
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import map_fn as map_fn_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables as variables_module
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import moving_averages
from tensorflow.python.training.tracking import util as tracking_util
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import keras_export
py_all = all
py_sum = sum
py_any = any
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
# This is thread-local to allow building separate models in different threads
# concurrently, but comes at the cost of not being able to build one model
# across threads.
_GRAPH = threading.local()
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = threading.local()
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
_SESSION = threading.local()
# _DUMMY_EAGER_GRAPH.key is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
# _DummyEagerGraph inherits from threading.local to make its `key` attribute
# thread local. This is needed to make set_learning_phase affect only the
# current thread during eager execution (see b/123096885 for more details).
class _DummyEagerGraph(threading.local):
"""_DummyEagerGraph provides a thread local `key` attribute.
We can't use threading.local directly, i.e. without subclassing, because
gevent monkey patches threading.local and its version does not support
weak references.
"""
class _WeakReferencableClass(object):
"""This dummy class is needed for two reasons.
- We need something that supports weak references. Basic types like string
and ints don't.
- We need something whose hash and equality are based on object identity
to make sure they are treated as different keys to _GRAPH_LEARNING_PHASES.
An empty Python class satisfies both of these requirements.
"""
pass
def __init__(self):
# Constructors for classes subclassing threading.local run once
# per thread accessing something in the class. Thus, each thread will
# get a different key.
super(_DummyEagerGraph, self).__init__()
self.key = _DummyEagerGraph._WeakReferencableClass()
self.learning_phase_is_set = False
_DUMMY_EAGER_GRAPH = _DummyEagerGraph()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export('keras.backend.backend')
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return 'tensorflow'
@keras_export('keras.backend.cast_to_floatx')
@dispatch.add_dispatch_support
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Arguments:
x: Numpy array or TensorFlow tensor.
Returns:
The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor
if `x` was a tensor), cast to its new type.
Example:
>>> tf.keras.backend.floatx()
'float32'
>>> arr = np.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = cast_to_floatx(arr)
>>> new_arr
array([1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
"""
if isinstance(x, (ops.Tensor,
variables_module.Variable,
sparse_tensor.SparseTensor)):
return math_ops.cast(x, dtype=floatx())
return np.asarray(x, dtype=floatx())
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
@keras_export('keras.backend.get_uid')
def get_uid(prefix=''):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Arguments:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
>>> get_uid('dense')
1
>>> get_uid('dense')
2
"""
graph = get_graph()
if graph not in PER_GRAPH_OBJECT_NAME_UIDS:
PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export('keras.backend.reset_uids')
def reset_uids():
"""Resets graph identifiers.
"""
PER_GRAPH_OBJECT_NAME_UIDS.clear()
@keras_export('keras.backend.clear_session')
def clear_session():
"""Resets all state generated by Keras.
Keras manages a global state, which it uses to implement the Functional
model-building API and to uniquify autogenerated layer names.
If you are creating many models in a loop, this global state will consume
an increasing amount of memory over time, and you may want to clear it.
Calling `clear_session()` releases the global state: this helps avoid clutter
from old models and layers, especially when memory is limited.
Example 1: calling `clear_session()` when creating models in a loop
```python
for _ in range(100):
# Without `clear_session()`, each iteration of this loop will
# slightly increase the size of the global state managed by Keras
model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])
for _ in range(100):
# With `clear_session()` called at the beginning,
# Keras starts with a blank state at each iteration
# and memory consumption is constant over time.
tf.keras.backend.clear_session()
model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])
```
Example 2: resetting the layer name generation counter
>>> import tensorflow as tf
>>> layers = [tf.keras.layers.Dense(10) for _ in range(10)]
>>> new_layer = tf.keras.layers.Dense(10)
>>> print(new_layer.name)
dense_10
>>> tf.keras.backend.set_learning_phase(1)
>>> print(tf.keras.backend.learning_phase())
1
>>> tf.keras.backend.clear_session()
>>> new_layer = tf.keras.layers.Dense(10)
>>> print(new_layer.name)
dense
"""
global _SESSION
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned
global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned
global _GRAPH
_GRAPH.graph = None
ops.reset_default_graph()
reset_uids()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
_DUMMY_EAGER_GRAPH.learning_phase_is_set = False
_GRAPH_LEARNING_PHASES.clear()
# Create the learning phase placeholder in graph using the default factory.
_GRAPH_LEARNING_PHASES.setdefault(graph)
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
@keras_export('keras.backend.manual_variable_initialization')
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.compat.v1.initialize_all_variables()`).
Arguments:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export('keras.backend.learning_phase')
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
graph = ops.get_default_graph()
if graph is getattr(_GRAPH, 'graph', None):
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
learning_phase = symbolic_learning_phase()
else:
with ops.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the learning
# phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
learning_phase = _GRAPH_LEARNING_PHASES[None]
_mark_func_graph_as_unsaveable(graph, learning_phase)
return learning_phase
def global_learning_phase_is_set():
return _DUMMY_EAGER_GRAPH.learning_phase_is_set
def _mark_func_graph_as_unsaveable(graph, learning_phase):
"""Mark func graph as unsaveable due to use of symbolic keras learning phase.
Functions that capture the symbolic learning phase cannot be exported to
SavedModel. Mark the funcgraph as unsaveable, so that an error will be raised
if it is exported.
Args:
graph: Graph or FuncGraph object.
learning_phase: Learning phase placeholder or int defined in the graph.
"""
if graph.building_function and is_placeholder(learning_phase):
graph.mark_as_unsaveable(
'The keras learning phase placeholder was used inside a function. '
'Exporting placeholders is not supported when saving out a SavedModel. '
'Please call `tf.keras.backend.set_learning_phase(0)` in the function '
'to set the learning phase to a constant value.')
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
return _GRAPH_LEARNING_PHASES[graph]
def _default_learning_phase():
if context.executing_eagerly():
return 0
else:
with name_scope(''):
return array_ops.placeholder_with_default(
False, shape=(), name='keras_learning_phase')
@deprecated('2020-10-11',
'Simply pass a True/False value to the `training` argument '
'of the `__call__` method of your layer or model.')
@keras_export('keras.backend.set_learning_phase')
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
The backend learning phase affects any code that calls
`backend.learning_phase()`
In particular, all Keras built-in layers use the learning phase as the default
for the `training` arg to `Layer.__call__`.
User-written layers and models can achieve the same behavior with code that
looks like:
```python
def call(self, inputs, training=None):
if training is None:
training = backend.learning_phase()
```
Arguments:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
deprecated_internal_set_learning_phase(value)
def deprecated_internal_set_learning_phase(value):
"""A deprecated internal implementation of set_learning_phase.
This method is an internal-only version of `set_learning_phase` that
does not raise a deprecation error. It is required because
saved_model needs to keep working with user code that uses the deprecated
learning phase methods until those apis are fully removed from the public api.
Specifically SavedModel saving needs to make sure the learning phase is 0
during tracing even if users overwrote it to a different value.
But, we don't want to raise deprecation warnings for users when savedmodel
sets learning phase just for compatibility with code that relied on
explicitly setting the learning phase for other values.
Arguments:
value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
# In an eager context, the learning phase values applies to both the eager
# context and the internal Keras graph.
_DUMMY_EAGER_GRAPH.learning_phase_is_set = True
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value
_GRAPH_LEARNING_PHASES[get_graph()] = value
@deprecated('2020-10-11',
'Simply pass a True/False value to the `training` argument '
'of the `__call__` method of your layer or model.')
@keras_export('keras.backend.learning_phase_scope')
@tf_contextlib.contextmanager
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the scope.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
with deprecated_internal_learning_phase_scope(value):
try:
yield
finally:
pass
@tf_contextlib.contextmanager
def deprecated_internal_learning_phase_scope(value):
"""An internal-only version of `learning_phase_scope`.
Unlike the public method, this method does not raise a deprecation warning.
This is needed because saved model saving needs to set learning phase
to maintain compatibility
with code that sets/gets the learning phase, but saved model
saving itself shouldn't raise a deprecation warning.
We can get rid of this method and its usages when the public api is
removed.
Arguments:
value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
if value not in {0, 1}:
raise ValueError('Expected learning phase to be 0 or 1.')
with ops.init_scope():
if context.executing_eagerly():
previous_eager_value = _GRAPH_LEARNING_PHASES.get(
_DUMMY_EAGER_GRAPH.key, None)
previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None)
learning_phase_previously_set = _DUMMY_EAGER_GRAPH.learning_phase_is_set
try:
deprecated_internal_set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
if not learning_phase_previously_set:
_DUMMY_EAGER_GRAPH.learning_phase_is_set = False
with ops.init_scope():
if context.executing_eagerly():
if previous_eager_value is not None:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value
elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]
graph = get_graph()
if previous_graph_value is not None:
_GRAPH_LEARNING_PHASES[graph] = previous_graph_value
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager / tf.function only.
Arguments:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned
assert value in {0, 1}
assert ops.executing_eagerly_outside_functions()
global_learning_phase_was_set = global_learning_phase_is_set()
if global_learning_phase_was_set:
previous_value = learning_phase()
try:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = value
yield
finally:
# Restore learning phase to initial value or unset.
if global_learning_phase_was_set:
_GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_value
else:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]
def _current_graph(op_input_list):
"""Return the graph members of `op_input_list`, or the current graph."""
return ops._get_graph_from_inputs(op_input_list)
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = ops.get_default_session()
if default_session is not None:
session = default_session
else:
if ops.inside_function():
raise RuntimeError('Cannot get session inside Tensorflow graph function.')
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if (getattr(_SESSION, 'session', None) is None or
_SESSION.session.graph is not _current_graph(op_input_list)):
# If we are creating the Session inside a tf.distribute.Strategy scope,
# we ask the strategy for the right session options to use.
if distribution_strategy_context.has_strategy():
configure_and_create_distributed_session(
distribution_strategy_context.get_strategy())
else:
_SESSION.session = session_module.Session(
config=get_default_session_config())
session = _SESSION.session
return session
@keras_export(v1=['keras.backend.get_session'])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Arguments:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
# Inject the get_session function to tracking_util to avoid the backward
# dependency from TF to Keras.
tracking_util.register_session_provider(get_session)
def get_graph():
if context.executing_eagerly():
global _GRAPH
if not getattr(_GRAPH, 'graph', None):
_GRAPH.graph = func_graph.FuncGraph('keras_graph')
return _GRAPH.graph
else:
return ops.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
scratch_graph = getattr(_CURRENT_SCRATCH_GRAPH, 'graph', None)
# If scratch graph and `graph` are both configured, they must match.
if (scratch_graph is not None and graph is not None and
scratch_graph is not graph):
raise ValueError('Multiple scratch graphs specified.')
if scratch_graph:
yield scratch_graph
return
graph = graph or func_graph.FuncGraph('keras_scratch_graph')
try:
_CURRENT_SCRATCH_GRAPH.graph = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH.graph = None
@keras_export(v1=['keras.backend.set_session'])
def set_session(session):
"""Sets the global TensorFlow session.
Arguments:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if os.environ.get('OMP_NUM_THREADS'):
logging.warning(
'OMP_NUM_THREADS is no longer used by the default Keras config. '
'To configure the number of threads, use tf.config.threading APIs.')
config = context.context().config
config.allow_soft_placement = True
return config
def get_default_graph_uid_map():
graph = ops.get_default_graph()
name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map
return name_uid_map
# DEVICE MANIPULATION
class _TfDeviceCaptureOp(object):
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if isinstance(device, device_spec.DeviceSpecV2):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
if tf2.enabled():
return device_spec.DeviceSpecV2.from_string(op.device)
else:
return device_spec.DeviceSpecV1.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set on the device type specified.
Arguments:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on the
device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ['CPU', 'GPU']:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available gpu devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if ops.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [d.name for d in config.list_logical_devices('GPU')]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == 'GPU']
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device('CPU')
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Arguments:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return constant_op.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Arguments:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return ops.convert_to_tensor_v2(x, dtype=dtype)
@keras_export('keras.backend.is_sparse')
def is_sparse(tensor):
"""Returns whether a tensor is a sparse tensor.
Arguments:
tensor: A tensor instance.
Returns:
A boolean.
Example:
>>> a = tf.keras.backend.placeholder((2, 2), sparse=False)
>>> print(tf.keras.backend.is_sparse(a))
False
>>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
>>> print(tf.keras.backend.is_sparse(b))
True
"""
spec = getattr(tensor, '_type_spec', None)
if spec is not None:
return isinstance(spec, sparse_tensor.SparseTensorSpec)
return isinstance(tensor, sparse_tensor.SparseTensor)
@keras_export('keras.backend.to_dense')
@dispatch.add_dispatch_support
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
Arguments:
tensor: A tensor instance (potentially sparse).
Returns:
A dense tensor.
Examples:
>>> b = tf.keras.backend.placeholder((2, 2), sparse=True)
>>> print(tf.keras.backend.is_sparse(b))
True
>>> c = tf.keras.backend.to_dense(b)
>>> print(tf.keras.backend.is_sparse(c))
False
"""
if is_sparse(tensor):
return sparse_ops.sparse_tensor_to_dense(tensor)
else:
return tensor
@keras_export('keras.backend.name_scope', v1=[])
def name_scope(name):
"""A context manager for use when defining a Python op.
This context manager pushes a name scope, which will make the name of all
operations added within it have a prefix.
For example, to define a new Python op called `my_op`:
def my_op(a):
with tf.name_scope("MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
# Define some computation that uses `a`.
return foo_op(..., name=scope)
When executed, the Tensor `a` will have the name `MyOp/a`.
Args:
name: The prefix to use on all names created within the name scope.
Returns:
Name scope context manager.
"""
return ops.name_scope_v2(name)
# Export V1 version.
keras_export(v1=['keras.backend.name_scope'])(ops.name_scope_v1)
@keras_export('keras.backend.variable')
def variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it.
Arguments:
value: Numpy array, initial value of the tensor.
dtype: Tensor type.
name: Optional name string for the tensor.
constraint: Optional projection function to be
applied to the variable after an optimizer update.
Returns:
A variable instance (with Keras metadata included).
Examples:
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val, dtype='float64',
... name='example_var')
>>> tf.keras.backend.dtype(kvar)
'float64'
>>> print(kvar)
<tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy=
array([[1., 2.],
[3., 4.]])>
"""
if dtype is None:
dtype = floatx()
if hasattr(value, 'tocoo'):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims(
sparse_coo.col, 1)), 1)
v = sparse_tensor.SparseTensor(
indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape)
v._keras_shape = sparse_coo.shape
return v
v = variables_module.Variable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, 'shape'):
v._keras_shape = int_shape(value)
track_variable(v)
return v
def track_tf_optimizer(tf_optimizer):
"""Tracks the given TF optimizer for initialization of its variables."""
if context.executing_eagerly():
return
optimizers = _GRAPH_TF_OPTIMIZERS[None]
optimizers.add(tf_optimizer)
def track_variable(v):
"""Tracks the given variable for initialization."""
if context.executing_eagerly():
return
graph = v.graph if hasattr(v, 'graph') else get_graph()
_GRAPH_VARIABLES[graph].add(v)
def unique_object_name(name,
name_uid_map=None,
avoid_names=None,
namespace='',
zero_based=False):
"""Makes a object name (or arbitrary string) unique within a TensorFlow graph.
Arguments:
name: String name to make unique.
name_uid_map: An optional defaultdict(int) to use when creating unique
names. If None (default), uses a per-Graph dictionary.
avoid_names: An optional set or dict with names which should not be used. If
None (default) does not avoid any names.
namespace: Gets a name which is unique within the (graph, namespace). Layers
which are not Networks use a blank namespace and so get graph-global
names.
zero_based: If True, name sequences start with no suffix (e.g. "dense",
"dense_1"). If False, naming is one-based ("dense_1", "dense_2").
Returns:
Unique string name.
Example:
unique_object_name('dense') # dense_1
unique_object_name('dense') # dense_2
"""
if name_uid_map is None:
name_uid_map = get_default_graph_uid_map()
if avoid_names is None:
avoid_names = set()
proposed_name = None
while proposed_name is None or proposed_name in avoid_names:
name_key = (namespace, name)
if zero_based:
number = name_uid_map[name_key]
if number:
proposed_name = name + '_' + str(number)
else:
proposed_name = name
name_uid_map[name_key] += 1
else:
name_uid_map[name_key] += 1
proposed_name = name + '_' + str(name_uid_map[name_key])
return proposed_name
def _get_variables(graph=None):
"""Returns variables corresponding to the given graph for initialization."""
assert not context.executing_eagerly()
variables = _GRAPH_VARIABLES[graph]
for opt in _GRAPH_TF_OPTIMIZERS[graph]:
variables.update(opt.optimizer.variables())
return variables
def _initialize_variables(session):
"""Utility to initialize uninitialized variables on the fly."""
variables = _get_variables(get_graph())
candidate_vars = []
for v in variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if candidate_vars:
# This step is expensive, so we only run it on variables not already
# marked as initialized.
is_initialized = session.run(
[variables_module.is_variable_initialized(v) for v in candidate_vars])
# TODO(kathywu): Some metric variables loaded from SavedModel are never
# actually used, and do not have an initializer.
should_be_initialized = [
(not is_initialized[n]) and v.initializer is not None
for n, v in enumerate(candidate_vars)]
uninitialized_vars = []
for flag, v in zip(should_be_initialized, candidate_vars):
if flag:
uninitialized_vars.append(v)
v._keras_initialized = True
if uninitialized_vars:
session.run(variables_module.variables_initializer(uninitialized_vars))
@keras_export('keras.backend.constant')
@dispatch.add_dispatch_support
def constant(value, dtype=None, shape=None, name=None):
"""Creates a constant tensor.
Arguments:
value: A constant value (or list)
dtype: The type of the elements of the resulting tensor.
shape: Optional dimensions of resulting tensor.
name: Optional name for the tensor.
Returns:
A Constant Tensor.
"""
if dtype is None:
dtype = floatx()
return constant_op.constant(value, dtype=dtype, shape=shape, name=name)
@keras_export('keras.backend.is_keras_tensor')
def is_keras_tensor(x):
"""Returns whether `x` is a Keras tensor.
A "Keras tensor" is a tensor that was returned by a Keras layer,
(`Layer` class) or by `Input`.
Arguments:
x: A candidate tensor.
Returns:
A boolean: Whether the argument is a Keras tensor.
Raises:
ValueError: In case `x` is not a symbolic tensor.
Examples:
>>> np_var = np.array([1, 2])
>>> # A numpy array is not a symbolic tensor.
>>> tf.keras.backend.is_keras_tensor(np_var)
Traceback (most recent call last):
...
ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`.
Expected a symbolic tensor instance.
>>> keras_var = tf.keras.backend.variable(np_var)
>>> # A variable created with the keras backend is not a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_var)
False
>>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> # A placeholder is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_placeholder)
True
>>> keras_input = tf.keras.layers.Input([10])
>>> # An Input is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_input)
True
>>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input)
>>> # Any Keras layer output is a Keras tensor.
>>> tf.keras.backend.is_keras_tensor(keras_layer_output)
True
"""
if not isinstance(x,
(ops.Tensor, variables_module.Variable,
sparse_tensor.SparseTensor, ragged_tensor.RaggedTensor,
keras_tensor.KerasTensor)):
raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) +
'`. Expected a symbolic tensor instance.')
if keras_tensor.keras_tensors_enabled():
return isinstance(x, keras_tensor.KerasTensor)
return hasattr(x, '_keras_history')
@keras_export('keras.backend.placeholder')
def placeholder(shape=None,
ndim=None,
dtype=None,
sparse=False,
name=None,
ragged=False):
"""Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
ragged: Boolean, whether the placeholder should have a ragged type.
In this case, values of 'None' in the 'shape' argument represent
ragged dimensions. For more information about RaggedTensors, see this
[guide](https://www.tensorflow.org/guide/ragged_tensors).
Raises:
ValueError: If called with eager execution
ValueError: If called with sparse = True and ragged = True.
Returns:
Tensor instance (with Keras metadata included).
Examples:
>>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_...' shape=(2, 4, 5) dtype=float32>
"""
if sparse and ragged:
raise ValueError(
'Cannot set both sparse and ragged to True when creating a placeholder.'
)
if dtype is None:
dtype = floatx()
if not shape:
if ndim:
shape = (None,) * ndim
if keras_tensor.keras_tensors_enabled():
spec = tensor_spec.TensorSpec(
shape=shape, dtype=dtype, name=name)
if sparse:
spec = sparse_tensor.SparseTensorSpec(
shape=shape, dtype=dtype)
elif ragged:
ragged_rank = 0
for i in range(1, len(shape)):
# Hacky because could be tensorshape or tuple maybe?
# Or just tensorshape?
if shape[i] is None or (
hasattr(shape[i], 'value') and
shape[i].value is None):
ragged_rank = i
spec = ragged_tensor.RaggedTensorSpec(
shape=shape, dtype=dtype, ragged_rank=ragged_rank)
x = keras_tensor.KerasTensor(spec, name=name)
else:
with get_graph().as_default():
if sparse:
x = array_ops.sparse_placeholder(dtype, shape=shape, name=name)
elif ragged:
ragged_rank = 0
for i in range(1, len(shape)):
if shape[i] is None:
ragged_rank = i
type_spec = ragged_tensor.RaggedTensorSpec(
shape=shape, dtype=dtype, ragged_rank=ragged_rank)
def tensor_spec_to_placeholder(tensorspec):
return array_ops.placeholder(tensorspec.dtype, tensorspec.shape)
x = nest.map_structure(tensor_spec_to_placeholder, type_spec,
expand_composites=True)
else:
x = array_ops.placeholder(dtype, shape=shape, name=name)
if context.executing_eagerly():
# Add keras_history connectivity information to the placeholder
# when the placeholder is built in a top-level eager context
# (intended to be used with keras.backend.function)
from tensorflow.python.keras.engine import input_layer # pylint: disable=g-import-not-at-top
x = input_layer.Input(tensor=x)
if keras_tensor.keras_tensors_enabled():
x._is_backend_placeholder = True
return x
def is_placeholder(x):
"""Returns whether `x` is a placeholder.
Arguments:
x: A candidate placeholder.
Returns:
Boolean.
"""
try:
if keras_tensor.keras_tensors_enabled():
return hasattr(x, '_is_backend_placeholder')
if isinstance(x, composite_tensor.CompositeTensor):
flat_components = nest.flatten(x, expand_composites=True)
return py_any(is_placeholder(c) for c in flat_components)
else:
return x.op.type == 'Placeholder'
except AttributeError:
return False
@keras_export('keras.backend.shape')
@dispatch.add_dispatch_support
def shape(x):
"""Returns the symbolic shape of a tensor or variable.
Arguments:
x: A tensor or variable.
Returns:
A symbolic shape (which is itself a tensor).
Examples:
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.shape(kvar)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)>
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> tf.keras.backend.shape(input)
<tf.Tensor 'Shape_...' shape=(3,) dtype=int32>
"""
return array_ops.shape(x)
@keras_export('keras.backend.int_shape')
def int_shape(x):
"""Returns the shape of tensor or variable as a tuple of int or None entries.
Arguments:
x: Tensor or variable.
Returns:
A tuple of integers (or None entries).
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> tf.keras.backend.int_shape(input)
(2, 4, 5)
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.int_shape(kvar)
(2, 2)
"""
try:
shape = x.shape
if not isinstance(shape, tuple):
shape = tuple(shape.as_list())
return shape
except ValueError:
return None
@keras_export('keras.backend.ndim')
def ndim(x):
"""Returns the number of axes in a tensor, as an integer.
Arguments:
x: Tensor or variable.
Returns:
Integer (scalar), number of axes.
Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5))
>>> val = np.array([[1, 2], [3, 4]])
>>> kvar = tf.keras.backend.variable(value=val)
>>> tf.keras.backend.ndim(input)
3
>>> tf.keras.backend.ndim(kvar)
2
"""
dims = x.shape._dims
if dims is not None:
return len(dims)
return None
@keras_export('keras.backend.dtype')
@dispatch.add_dispatch_support
def dtype(x):
"""Returns the dtype of a Keras tensor or variable, as a string.
Arguments:
x: Tensor or variable.
Returns:
String, dtype of `x`.
Examples:
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5)))
'float32'
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
... dtype='float32'))
'float32'
>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5),
... dtype='float64'))
'float64'
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]))
>>> tf.keras.backend.dtype(kvar)
'float32'
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
... dtype='float32')
>>> tf.keras.backend.dtype(kvar)
'float32'
"""
return x.dtype.base_dtype.name
@keras_export('keras.backend.eval')
def eval(x):
"""Evaluates the value of a variable.
Arguments:
x: A variable.
Returns:
A Numpy array.
Examples:
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]),
... dtype='float32')
>>> tf.keras.backend.eval(kvar)
array([[1., 2.],
[3., 4.]], dtype=float32)
"""
return get_value(to_dense(x))
@keras_export('keras.backend.zeros')
def zeros(shape, dtype=None, name=None):
"""Instantiates an all-zeros variable and returns it.
Arguments:
shape: Tuple or list of integers, shape of returned Keras variable
dtype: data type of returned Keras variable
name: name of returned Keras variable
Returns:
A variable (including Keras metadata), filled with `0.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
>>> kvar = tf.keras.backend.zeros((3,4))
>>> tf.keras.backend.eval(kvar)
array([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
>>> A = tf.constant([1,2,3])
>>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.]
>>> tf.keras.backend.eval(kvar2)
array([0., 0., 0.], dtype=float32)
>>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32)
>>> tf.keras.backend.eval(kvar3)
array([0, 0, 0], dtype=int32)
>>> kvar4 = tf.keras.backend.zeros([2,3])
>>> tf.keras.backend.eval(kvar4)
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.zeros(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@keras_export('keras.backend.ones')
@dispatch.add_dispatch_support
def ones(shape, dtype=None, name=None):
"""Instantiates an all-ones variable and returns it.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, filled with `1.0`.
Note that if `shape` was symbolic, we cannot return a variable,
and will return a dynamically-shaped tensor instead.
Example:
>>> kvar = tf.keras.backend.ones((3,4))
>>> tf.keras.backend.eval(kvar)
array([[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]], dtype=float32)
"""
with ops.init_scope():
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
v = array_ops.ones(shape=shape, dtype=tf_dtype, name=name)
if py_all(v.shape.as_list()):
return variable(v, dtype=dtype, name=name)
return v
@keras_export('keras.backend.eye')
@dispatch.add_dispatch_support
def eye(size, dtype=None, name=None):
"""Instantiate an identity matrix and returns it.
Arguments:
size: Integer, number of rows/columns.
dtype: String, data type of returned Keras variable.
name: String, name of returned Keras variable.
Returns:
A Keras variable, an identity matrix.
Example:
>>> kvar = tf.keras.backend.eye(3)
>>> tf.keras.backend.eval(kvar)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=float32)
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
return variable(linalg_ops.eye(size, dtype=tf_dtype), dtype, name)
@keras_export('keras.backend.zeros_like')
def zeros_like(x, dtype=None, name=None):
"""Instantiates an all-zeros variable of the same shape as another tensor.
Arguments:
x: Keras variable or Keras tensor.
dtype: dtype of returned Keras variable.
`None` uses the dtype of `x`.
name: name for the variable to create.
Returns:
A Keras variable with the shape of `x` filled with zeros.
Example:
from tensorflow.keras import backend as K
kvar = K.variable(np.random.random((2,3)))
kvar_zeros = K.zeros_like(kvar)
K.eval(kvar_zeros)
# array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)
"""
return array_ops.zeros_like(x, dtype=dtype, name=name)
@keras_export('keras.backend.ones_like')
@dispatch.add_dispatch_support
def ones_like(x, dtype=None, name=None):
"""Instantiates an all-ones variable of the same shape as another tensor.
Arguments:
x: Keras variable or tensor.
dtype: String, dtype of returned Keras variable.
None uses the dtype of x.
name: String, name for the variable to create.
Returns:
A Keras variable with the shape of x filled with ones.
Example:
>>> kvar = tf.keras.backend.variable(np.random.random((2,3)))
>>> kvar_ones = tf.keras.backend.ones_like(kvar)
>>> tf.keras.backend.eval(kvar_ones)
array([[1., 1., 1.],
[1., 1., 1.]], dtype=float32)
"""
return array_ops.ones_like(x, dtype=dtype, name=name)
def identity(x, name=None):
"""Returns a tensor with the same content as the input tensor.
Arguments:
x: The input tensor.
name: String, name for the variable to create.
Returns:
A tensor of the same shape, type and content.
"""
return array_ops.identity(x, name=name)
@keras_export('keras.backend.random_uniform_variable')
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None):
"""Instantiates a variable with values drawn from a uniform distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
low: Float, lower boundary of the output interval.
high: Float, upper boundary of the output interval.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
>>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3),
... low=0.0, high=1.0)
>>> kvar
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_uniform_initializer(
low, high, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.random_normal_variable')
def random_normal_variable(shape, mean, scale, dtype=None, name=None,
seed=None):
"""Instantiates a variable with values drawn from a normal distribution.
Arguments:
shape: Tuple of integers, shape of returned Keras variable.
mean: Float, mean of the normal distribution.
scale: Float, standard deviation of the normal distribution.
dtype: String, dtype of returned Keras variable.
name: String, name of returned Keras variable.
seed: Integer, random seed.
Returns:
A Keras variable, filled with drawn samples.
Example:
>>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3),
... mean=0.0, scale=1.0)
>>> kvar
<tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
tf_dtype = dtypes_module.as_dtype(dtype)
if seed is None:
# ensure that randomness is conditioned by the Numpy RNG
seed = np.random.randint(10e8)
value = init_ops.random_normal_initializer(
mean, scale, dtype=tf_dtype, seed=seed)(shape)
return variable(value, dtype=dtype, name=name)
@keras_export('keras.backend.count_params')
def count_params(x):
"""Returns the static number of elements in a variable or tensor.
Arguments:
x: Variable or tensor.
Returns:
Integer, the number of scalars in `x`.
Example:
>>> kvar = tf.keras.backend.zeros((2,3))
>>> tf.keras.backend.count_params(kvar)
6
>>> tf.keras.backend.eval(kvar)
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
"""
return np.prod(x.shape.as_list())
@keras_export('keras.backend.cast')
@dispatch.add_dispatch_support
def cast(x, dtype):
"""Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Arguments:
x: Keras tensor (or variable).
dtype: String, either (`'float16'`, `'float32'`, or `'float64'`).
Returns:
Keras tensor with dtype `dtype`.
Examples:
Cast a float32 variable to a float64 tensor
>>> input = tf.keras.backend.ones(shape=(1,3))
>>> print(input)
<tf.Variable 'Variable:0' shape=(1, 3) dtype=float32,
numpy=array([[1., 1., 1.]], dtype=float32)>
>>> cast_input = tf.keras.backend.cast(input, dtype='float64')
>>> print(cast_input)
tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
"""
return math_ops.cast(x, dtype)
# UPDATES OPS
@keras_export('keras.backend.update')
def update(x, new_x):
return state_ops.assign(x, new_x)
@keras_export('keras.backend.update_add')
def update_add(x, increment):
"""Update the value of `x` by adding `increment`.
Arguments:
x: A Variable.
increment: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_add(x, increment)
@keras_export('keras.backend.update_sub')
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
Arguments:
x: A Variable.
decrement: A tensor of same shape as `x`.
Returns:
The variable `x` updated.
"""
return state_ops.assign_sub(x, decrement)
@keras_export('keras.backend.moving_average_update')
def moving_average_update(x, value, momentum):
"""Compute the exponential moving average of a value.
The moving average 'x' is updated with 'value' following:
```
x = x * momentum + value * (1 - momentum)
```
For example:
>>> x = tf.Variable(0.0)
>>> momentum=0.9
>>> moving_average_update(x, value = 2.0, momentum=momentum).numpy()
>>> x.numpy()
0.2
The result will be biased towards the initial value of the variable.
If the variable was initialized to zero, you can divide by
`1 - momentum ** num_updates` to debias it (Section 3 of
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)):
>>> num_updates = 1.0
>>> x_zdb = x/(1 - momentum**num_updates)
>>> x_zdb.numpy()
2.0
Arguments:
x: A Variable, the moving average.
value: A tensor with the same shape as `x`, the new value to be
averaged in.
momentum: The moving average momentum.
Returns:
The updated variable.
"""
zero_debias = not tf2.enabled()
return moving_averages.assign_moving_average(
x, value, momentum, zero_debias=zero_debias)
# LINEAR ALGEBRA
@keras_export('keras.backend.dot')
@dispatch.add_dispatch_support
def dot(x, y):
"""Multiplies 2 tensors (and/or variables) and returns a tensor.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor, dot product of `x` and `y`.
Examples:
>>> x = tf.keras.backend.placeholder(shape=(2, 3))
>>> y = tf.keras.backend.placeholder(shape=(3, 4))
>>> xy = tf.keras.backend.dot(x, y)
>>> xy
<tf.Tensor ... shape=(2, 4) dtype=float32>
>>> x = tf.keras.backend.placeholder(shape=(32, 28, 3))
>>> y = tf.keras.backend.placeholder(shape=(3, 4))
>>> xy = tf.keras.backend.dot(x, y)
>>> xy
<tf.Tensor ... shape=(32, 28, 4) dtype=float32>
>>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1)
>>> y = tf.keras.backend.ones((4, 3, 5))
>>> xy = tf.keras.backend.dot(x, y)
>>> tf.keras.backend.int_shape(xy)
(2, 4, 5)
"""
if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2):
x_shape = []
for i, s in zip(int_shape(x), array_ops.unstack(array_ops.shape(x))):
if i is not None:
x_shape.append(i)
else:
x_shape.append(s)
x_shape = tuple(x_shape)
y_shape = []
for i, s in zip(int_shape(y), array_ops.unstack(array_ops.shape(y))):
if i is not None:
y_shape.append(i)
else:
y_shape.append(s)
y_shape = tuple(y_shape)
y_permute_dim = list(range(ndim(y)))
y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim
xt = array_ops.reshape(x, [-1, x_shape[-1]])
yt = array_ops.reshape(
array_ops.transpose(y, perm=y_permute_dim), [y_shape[-2], -1])
return array_ops.reshape(
math_ops.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:])
if is_sparse(x):
out = sparse_ops.sparse_tensor_dense_matmul(x, y)
else:
out = math_ops.matmul(x, y)
return out
@keras_export('keras.backend.batch_dot')
@dispatch.add_dispatch_support
def batch_dot(x, y, axes=None):
"""Batchwise dot product.
`batch_dot` is used to compute dot product of `x` and `y` when
`x` and `y` are data in batch, i.e. in a shape of
`(batch_size, :)`.
`batch_dot` results in a tensor or variable with less dimensions
than the input. If the number of dimensions is reduced to 1,
we use `expand_dims` to make sure that ndim is at least 2.
Arguments:
x: Keras tensor or variable with `ndim >= 2`.
y: Keras tensor or variable with `ndim >= 2`.
axes: Tuple or list of integers with target dimensions, or single integer.
The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal.
Returns:
A tensor with shape equal to the concatenation of `x`'s shape
(less the dimension that was summed over) and `y`'s shape
(less the batch dimension and the dimension that was summed over).
If the final rank is 1, we reshape it to `(batch_size, 1)`.
Examples:
>>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1))
>>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20))
>>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2))
>>> tf.keras.backend.int_shape(xy_batch_dot)
(32, 1, 30)
Shape inference:
Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`.
If `axes` is (1, 2), to find the output shape of resultant tensor,
loop through each dimension in `x`'s shape and `y`'s shape:
* `x.shape[0]` : 100 : append to output shape
* `x.shape[1]` : 20 : do not append to output shape,
dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1)
* `y.shape[0]` : 100 : do not append to output shape,
always ignore first dimension of `y`
* `y.shape[1]` : 30 : append to output shape
* `y.shape[2]` : 20 : do not append to output shape,
dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2)
`output_shape` = `(100, 30)`
"""
x_shape = int_shape(x)
y_shape = int_shape(y)
x_ndim = len(x_shape)
y_ndim = len(y_shape)
if x_ndim < 2 or y_ndim < 2:
raise ValueError('Cannot do batch_dot on inputs '
'with rank < 2. '
'Received inputs with shapes ' +
str(x_shape) + ' and ' +
str(y_shape) + '.')
x_batch_size = x_shape[0]
y_batch_size = y_shape[0]
if x_batch_size is not None and y_batch_size is not None:
if x_batch_size != y_batch_size:
raise ValueError('Cannot do batch_dot on inputs '
'with different batch sizes. '
'Received inputs with shapes ' +
str(x_shape) + ' and ' +
str(y_shape) + '.')
if isinstance(axes, int):
axes = [axes, axes]
if axes is None:
if y_ndim == 2:
axes = [x_ndim - 1, y_ndim - 1]
else:
axes = [x_ndim - 1, y_ndim - 2]
if py_any(isinstance(a, (list, tuple)) for a in axes):
raise ValueError('Multiple target dimensions are not supported. ' +
'Expected: None, int, (int, int), ' +
'Provided: ' + str(axes))
# if tuple, convert to list.
axes = list(axes)
# convert negative indices.
if axes[0] < 0:
axes[0] += x_ndim
if axes[1] < 0:
axes[1] += y_ndim
# sanity checks
if 0 in axes:
raise ValueError('Cannot perform batch_dot over axis 0. '
'If your inputs are not batched, '
'add a dummy batch dimension to your '
'inputs using K.expand_dims(x, 0)')
a0, a1 = axes
d1 = x_shape[a0]
d2 = y_shape[a1]
if d1 is not None and d2 is not None and d1 != d2:
raise ValueError('Cannot do batch_dot on inputs with shapes ' +
str(x_shape) + ' and ' + str(y_shape) +
' with axes=' + str(axes) + '. x.shape[%d] != '
'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2))
# backup ndims. Need them later.
orig_x_ndim = x_ndim
orig_y_ndim = y_ndim
# if rank is 2, expand to 3.
if x_ndim == 2:
x = array_ops.expand_dims(x, 1)
a0 += 1
x_ndim += 1
if y_ndim == 2:
y = array_ops.expand_dims(y, 2)
y_ndim += 1
# bring x's dimension to be reduced to last axis.
if a0 != x_ndim - 1:
pattern = list(range(x_ndim))
for i in range(a0, x_ndim - 1):
pattern[i] = pattern[i + 1]
pattern[-1] = a0
x = array_ops.transpose(x, pattern)
# bring y's dimension to be reduced to axis 1.
if a1 != 1:
pattern = list(range(y_ndim))
for i in range(a1, 1, -1):
pattern[i] = pattern[i - 1]
pattern[1] = a1
y = array_ops.transpose(y, pattern)
# normalize both inputs to rank 3.
if x_ndim > 3:
# squash middle dimensions of x.
x_shape = shape(x)
x_mid_dims = x_shape[1:-1]
x_squashed_shape = array_ops.stack(
[x_shape[0], -1, x_shape[-1]])
x = array_ops.reshape(x, x_squashed_shape)
x_squashed = True
else:
x_squashed = False
if y_ndim > 3:
# squash trailing dimensions of y.
y_shape = shape(y)
y_trail_dims = y_shape[2:]
y_squashed_shape = array_ops.stack(
[y_shape[0], y_shape[1], -1])
y = array_ops.reshape(y, y_squashed_shape)
y_squashed = True
else:
y_squashed = False
result = math_ops.matmul(x, y)
# if inputs were squashed, we have to reshape the matmul output.
output_shape = array_ops.shape(result)
do_reshape = False
if x_squashed:
output_shape = array_ops.concat(
[output_shape[:1],
x_mid_dims,
output_shape[-1:]], 0)
do_reshape = True
if y_squashed:
output_shape = array_ops.concat([output_shape[:-1], y_trail_dims], 0)
do_reshape = True
if do_reshape:
result = array_ops.reshape(result, output_shape)
# if the inputs were originally rank 2, we remove the added 1 dim.
if orig_x_ndim == 2:
result = array_ops.squeeze(result, 1)
elif orig_y_ndim == 2:
result = array_ops.squeeze(result, -1)
return result
@keras_export('keras.backend.transpose')
@dispatch.add_dispatch_support
def transpose(x):
"""Transposes a tensor and returns it.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
Examples:
>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
>>> tf.keras.backend.eval(var)
array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)
>>> var_transposed = tf.keras.backend.transpose(var)
>>> tf.keras.backend.eval(var_transposed)
array([[1., 4.],
[2., 5.],
[3., 6.]], dtype=float32)
>>> input = tf.keras.backend.placeholder((2, 3))
>>> input
<tf.Tensor 'Placeholder_...' shape=(2, 3) dtype=float32>
>>> input_transposed = tf.keras.backend.transpose(input)
>>> input_transposed
<tf.Tensor 'Transpose_...' shape=(3, 2) dtype=float32>
"""
return array_ops.transpose(x)
@keras_export('keras.backend.gather')
@dispatch.add_dispatch_support
def gather(reference, indices):
"""Retrieves the elements of indices `indices` in the tensor `reference`.
Arguments:
reference: A tensor.
indices: An integer tensor of indices.
Returns:
A tensor of same type as `reference`.
Examples:
>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]])
>>> tf.keras.backend.eval(var)
array([[1., 2., 3.],
[4., 5., 6.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [0])
>>> tf.keras.backend.eval(var_gathered)
array([[1., 2., 3.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [1])
>>> tf.keras.backend.eval(var_gathered)
array([[4., 5., 6.]], dtype=float32)
>>> var_gathered = tf.keras.backend.gather(var, [0,1,0])
>>> tf.keras.backend.eval(var_gathered)
array([[1., 2., 3.],
[4., 5., 6.],
[1., 2., 3.]], dtype=float32)
"""
return array_ops.gather(reference, indices)
# ELEMENT-WISE OPERATIONS
@keras_export('keras.backend.max')
@dispatch.add_dispatch_support
def max(x, axis=None, keepdims=False):
"""Maximum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find maximum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with maximum values of `x`.
"""
return math_ops.reduce_max(x, axis, keepdims)
@keras_export('keras.backend.min')
@dispatch.add_dispatch_support
def min(x, axis=None, keepdims=False):
"""Minimum value in a tensor.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to find minimum values.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with minimum values of `x`.
"""
return math_ops.reduce_min(x, axis, keepdims)
@keras_export('keras.backend.sum')
@dispatch.add_dispatch_support
def sum(x, axis=None, keepdims=False):
"""Sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to sum over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with sum of `x`.
"""
return math_ops.reduce_sum(x, axis, keepdims)
@keras_export('keras.backend.prod')
@dispatch.add_dispatch_support
def prod(x, axis=None, keepdims=False):
"""Multiplies the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the product of elements of `x`.
"""
return math_ops.reduce_prod(x, axis, keepdims)
@keras_export('keras.backend.cumsum')
@dispatch.add_dispatch_support
def cumsum(x, axis=0):
"""Cumulative sum of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the sum.
Returns:
A tensor of the cumulative sum of values of `x` along `axis`.
"""
return math_ops.cumsum(x, axis=axis)
@keras_export('keras.backend.cumprod')
@dispatch.add_dispatch_support
def cumprod(x, axis=0):
"""Cumulative product of the values in a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the product.
Returns:
A tensor of the cumulative product of values of `x` along `axis`.
"""
return math_ops.cumprod(x, axis=axis)
@keras_export('keras.backend.var')
def var(x, axis=None, keepdims=False):
"""Variance of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to compute the variance.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`,
the reduced dimension is retained with length 1.
Returns:
A tensor with the variance of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_variance(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.std')
@dispatch.add_dispatch_support
def std(x, axis=None, keepdims=False):
"""Standard deviation of a tensor, alongside the specified axis.
It is an alias to `tf.math.reduce_std`.
Arguments:
x: A tensor or variable. It should have numerical dtypes. Boolean type
inputs will be converted to float.
axis: An integer, the axis to compute the standard deviation. If `None`
(the default), reduces all dimensions. Must be in the range
`[-rank(x), rank(x))`.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is retained with
length 1.
Returns:
A tensor with the standard deviation of elements of `x` with same dtype.
Boolean type input will be converted to float.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_std(x, axis=axis, keepdims=keepdims)
@keras_export('keras.backend.mean')
@dispatch.add_dispatch_support
def mean(x, axis=None, keepdims=False):
"""Mean of a tensor, alongside the specified axis.
Arguments:
x: A tensor or variable.
axis: A list of integer. Axes to compute the mean.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1 for each entry in `axis`. If `keepdims` is `True`,
the reduced dimensions are retained with length 1.
Returns:
A tensor with the mean of elements of `x`.
"""
if x.dtype.base_dtype == dtypes_module.bool:
x = math_ops.cast(x, floatx())
return math_ops.reduce_mean(x, axis, keepdims)
@keras_export('keras.backend.any')
@dispatch.add_dispatch_support
def any(x, axis=None, keepdims=False):
"""Bitwise reduction (logical OR).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_any(x, axis, keepdims)
@keras_export('keras.backend.all')
@dispatch.add_dispatch_support
def all(x, axis=None, keepdims=False):
"""Bitwise reduction (logical AND).
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
keepdims: whether the drop or broadcast the reduction axes.
Returns:
A uint8 tensor (0s and 1s).
"""
x = math_ops.cast(x, dtypes_module.bool)
return math_ops.reduce_all(x, axis, keepdims)
@keras_export('keras.backend.argmax')
@dispatch.add_dispatch_support
def argmax(x, axis=-1):
"""Returns the index of the maximum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmax(x, axis)
@keras_export('keras.backend.argmin')
@dispatch.add_dispatch_support
def argmin(x, axis=-1):
"""Returns the index of the minimum value along an axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
"""
return math_ops.argmin(x, axis)
@keras_export('keras.backend.square')
@dispatch.add_dispatch_support
def square(x):
"""Element-wise square.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.square(x)
@keras_export('keras.backend.abs')
@dispatch.add_dispatch_support
def abs(x):
"""Element-wise absolute value.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.abs(x)
@keras_export('keras.backend.sqrt')
@dispatch.add_dispatch_support
def sqrt(x):
"""Element-wise square root.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
zero = _constant_to_tensor(0., x.dtype.base_dtype)
inf = _constant_to_tensor(np.inf, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, inf)
return math_ops.sqrt(x)
@keras_export('keras.backend.exp')
@dispatch.add_dispatch_support
def exp(x):
"""Element-wise exponential.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.exp(x)
@keras_export('keras.backend.log')
@dispatch.add_dispatch_support
def log(x):
"""Element-wise log.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.log(x)
def logsumexp(x, axis=None, keepdims=False):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
This function is more numerically stable than log(sum(exp(x))).
It avoids overflows caused by taking the exp of large inputs and
underflows caused by taking the log of small inputs.
Arguments:
x: A tensor or variable.
axis: An integer, the axis to reduce over.
keepdims: A boolean, whether to keep the dimensions or not.
If `keepdims` is `False`, the rank of the tensor is reduced
by 1. If `keepdims` is `True`, the reduced dimension is
retained with length 1.
Returns:
The reduced tensor.
"""
return math_ops.reduce_logsumexp(x, axis, keepdims)
@keras_export('keras.backend.round')
@dispatch.add_dispatch_support
def round(x):
"""Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.round(x)
@keras_export('keras.backend.sign')
@dispatch.add_dispatch_support
def sign(x):
"""Element-wise sign.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sign(x)
@keras_export('keras.backend.pow')
@dispatch.add_dispatch_support
def pow(x, a):
"""Element-wise exponentiation.
Arguments:
x: Tensor or variable.
a: Python integer.
Returns:
A tensor.
"""
return math_ops.pow(x, a)
@keras_export('keras.backend.clip')
@dispatch.add_dispatch_support
def clip(x, min_value, max_value):
"""Element-wise value clipping.
Arguments:
x: Tensor or variable.
min_value: Python float, integer, or tensor.
max_value: Python float, integer, or tensor.
Returns:
A tensor.
"""
if (isinstance(min_value, (int, float)) and
isinstance(max_value, (int, float))):
if max_value < min_value:
max_value = min_value
if min_value is None:
min_value = -np.inf
if max_value is None:
max_value = np.inf
return clip_ops.clip_by_value(x, min_value, max_value)
@keras_export('keras.backend.equal')
@dispatch.add_dispatch_support
def equal(x, y):
"""Element-wise equality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.equal(x, y)
@keras_export('keras.backend.not_equal')
@dispatch.add_dispatch_support
def not_equal(x, y):
"""Element-wise inequality between two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.not_equal(x, y)
@keras_export('keras.backend.greater')
@dispatch.add_dispatch_support
def greater(x, y):
"""Element-wise truth value of (x > y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater(x, y)
@keras_export('keras.backend.greater_equal')
@dispatch.add_dispatch_support
def greater_equal(x, y):
"""Element-wise truth value of (x >= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.greater_equal(x, y)
@keras_export('keras.backend.less')
@dispatch.add_dispatch_support
def less(x, y):
"""Element-wise truth value of (x < y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less(x, y)
@keras_export('keras.backend.less_equal')
@dispatch.add_dispatch_support
def less_equal(x, y):
"""Element-wise truth value of (x <= y).
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A bool tensor.
"""
return math_ops.less_equal(x, y)
@keras_export('keras.backend.maximum')
@dispatch.add_dispatch_support
def maximum(x, y):
"""Element-wise maximum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor with the element wise maximum value(s) of `x` and `y`.
Examples:
>>> x = tf.Variable([[1, 2], [3, 4]])
>>> y = tf.Variable([[2, 1], [0, -1]])
>>> m = tf.keras.backend.maximum(x, y)
>>> m
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[2, 2],
[3, 4]], dtype=int32)>
"""
return math_ops.maximum(x, y)
@keras_export('keras.backend.minimum')
@dispatch.add_dispatch_support
def minimum(x, y):
"""Element-wise minimum of two tensors.
Arguments:
x: Tensor or variable.
y: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.minimum(x, y)
@keras_export('keras.backend.sin')
@dispatch.add_dispatch_support
def sin(x):
"""Computes sin of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.sin(x)
@keras_export('keras.backend.cos')
@dispatch.add_dispatch_support
def cos(x):
"""Computes cos of x element-wise.
Arguments:
x: Tensor or variable.
Returns:
A tensor.
"""
return math_ops.cos(x)
def _regular_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
normed = nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
return normed, mean, var
def _broadcast_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Non-fused, broadcast version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
mean, var = nn.moments(x, reduction_axes, None, None, False)
target_shape = []
for axis in range(ndim(x)):
if axis in reduction_axes:
target_shape.append(1)
else:
target_shape.append(array_ops.shape(x)[axis])
target_shape = array_ops.stack(target_shape)
broadcast_mean = array_ops.reshape(mean, target_shape)
broadcast_var = array_ops.reshape(var, target_shape)
if gamma is None:
broadcast_gamma = None
else:
broadcast_gamma = array_ops.reshape(gamma, target_shape)
if beta is None:
broadcast_beta = None
else:
broadcast_beta = array_ops.reshape(beta, target_shape)
normed = nn.batch_normalization(x, broadcast_mean, broadcast_var,
broadcast_beta, broadcast_gamma, epsilon)
return normed, mean, var
def _fused_normalize_batch_in_training(x,
gamma,
beta,
reduction_axes,
epsilon=1e-3):
"""Fused version of `normalize_batch_in_training`.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if list(reduction_axes) == [0, 1, 2]:
normalization_axis = 3
tf_data_format = 'NHWC'
else:
normalization_axis = 1
tf_data_format = 'NCHW'
if gamma is None:
gamma = constant_op.constant(
1.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
if beta is None:
beta = constant_op.constant(
0.0, dtype=x.dtype, shape=[x.shape[normalization_axis]])
return nn.fused_batch_norm(
x, gamma, beta, epsilon=epsilon, data_format=tf_data_format)
@keras_export('keras.backend.normalize_batch_in_training')
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
Arguments:
x: Input tensor or variable.
gamma: Tensor by which to scale the input.
beta: Tensor with which to center the input.
reduction_axes: iterable of integers,
axes over which to normalize.
epsilon: Fuzz factor.
Returns:
A tuple length of 3, `(normalized_tensor, mean, variance)`.
"""
if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]:
if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
return _fused_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
if sorted(reduction_axes) == list(range(ndim(x)))[:-1]:
return _regular_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
else:
return _broadcast_normalize_batch_in_training(
x, gamma, beta, reduction_axes, epsilon=epsilon)
@keras_export('keras.backend.batch_normalization')
@dispatch.add_dispatch_support
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
`output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta`
Arguments:
x: Input tensor or variable.
mean: Mean of batch.
var: Variance of batch.
beta: Tensor with which to center the input.
gamma: Tensor by which to scale the input.
axis: Integer, the axis that should be normalized.
(typically the features axis).
epsilon: Fuzz factor.
Returns:
A tensor.
"""
if ndim(x) == 4:
# The CPU implementation of `fused_batch_norm` only supports NHWC
if axis == 1 or axis == -3:
tf_data_format = 'NCHW'
elif axis == 3 or axis == -1:
tf_data_format = 'NHWC'
else:
tf_data_format = None
if (tf_data_format == 'NHWC' or
tf_data_format == 'NCHW' and _has_nchw_support()):
# The mean / var / beta / gamma tensors may be broadcasted
# so they may have extra axes of size 1, which should be squeezed.
if ndim(mean) > 1:
mean = array_ops.reshape(mean, [-1])
if ndim(var) > 1:
var = array_ops.reshape(var, [-1])
if beta is None:
beta = zeros_like(mean)
elif ndim(beta) > 1:
beta = array_ops.reshape(beta, [-1])
if gamma is None:
gamma = ones_like(mean)
elif ndim(gamma) > 1:
gamma = array_ops.reshape(gamma, [-1])
y, _, _ = nn.fused_batch_norm(
x,
gamma,
beta,
epsilon=epsilon,
mean=mean,
variance=var,
data_format=tf_data_format,
is_training=False
)
return y
return nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
# SHAPE OPERATIONS
@keras_export('keras.backend.concatenate')
@dispatch.add_dispatch_support
def concatenate(tensors, axis=-1):
"""Concatenates a list of tensors alongside the specified axis.
Arguments:
tensors: list of tensors to concatenate.
axis: concatenation axis.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]])
>>> tf.keras.backend.concatenate((a, b), axis=-1)
<tf.Tensor: shape=(3, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 10, 20, 30],
[ 4, 5, 6, 40, 50, 60],
[ 7, 8, 9, 70, 80, 90]], dtype=int32)>
"""
if axis < 0:
rank = ndim(tensors[0])
if rank:
axis %= rank
else:
axis = 0
if py_all(is_sparse(x) for x in tensors):
return sparse_ops.sparse_concat(axis, tensors)
elif py_all(isinstance(x, ragged_tensor.RaggedTensor) for x in tensors):
return ragged_concat_ops.concat(tensors, axis)
else:
return array_ops.concat([to_dense(x) for x in tensors], axis)
@keras_export('keras.backend.reshape')
@dispatch.add_dispatch_support
def reshape(x, shape):
"""Reshapes a tensor to the specified shape.
Arguments:
x: Tensor or variable.
shape: Target shape tuple.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.reshape(a, shape=(2, 6))
<tf.Tensor: shape=(2, 6), dtype=int32, numpy=
array([[ 1, 2, 3, 4, 5, 6],
[ 7, 8, 9, 10, 11, 12]], dtype=int32)>
"""
return array_ops.reshape(x, shape)
@keras_export('keras.backend.permute_dimensions')
@dispatch.add_dispatch_support
def permute_dimensions(x, pattern):
"""Permutes axes in a tensor.
Arguments:
x: Tensor or variable.
pattern: A tuple of
dimension indices, e.g. `(0, 2, 1)`.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]])
>>> a
<tf.Tensor: shape=(4, 3), dtype=int32, numpy=
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 7, 8, 9],
[10, 11, 12]], dtype=int32)>
>>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0))
<tf.Tensor: shape=(3, 4), dtype=int32, numpy=
array([[ 1, 4, 7, 10],
[ 2, 5, 8, 11],
[ 3, 6, 9, 12]], dtype=int32)>
"""
return array_ops.transpose(x, perm=pattern)
@keras_export('keras.backend.resize_images')
@dispatch.add_dispatch_support
def resize_images(x, height_factor, width_factor, data_format,
interpolation='nearest'):
"""Resizes the images contained in a 4D tensor.
Arguments:
x: Tensor or variable to resize.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
interpolation: A string, one of `nearest` or `bilinear`.
Returns:
A tensor.
Raises:
ValueError: in case of incorrect value for
`data_format` or `interpolation`.
"""
if data_format == 'channels_first':
rows, cols = 2, 3
elif data_format == 'channels_last':
rows, cols = 1, 2
else:
raise ValueError('Invalid `data_format` argument: %s' % (data_format,))
original_shape = int_shape(x)
new_shape = array_ops.shape(x)[rows:cols + 1]
new_shape *= constant_op.constant(
np.array([height_factor, width_factor], dtype='int32'))
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 2, 3, 1])
if interpolation == 'nearest':
x = image_ops.resize_images_v2(
x, new_shape, method=image_ops.ResizeMethod.NEAREST_NEIGHBOR)
elif interpolation == 'bilinear':
x = image_ops.resize_images_v2(x, new_shape,
method=image_ops.ResizeMethod.BILINEAR)
else:
raise ValueError('interpolation should be one '
'of "nearest" or "bilinear".')
if data_format == 'channels_first':
x = permute_dimensions(x, [0, 3, 1, 2])
if original_shape[rows] is None:
new_height = None
else:
new_height = original_shape[rows] * height_factor
if original_shape[cols] is None:
new_width = None
else:
new_width = original_shape[cols] * width_factor
if data_format == 'channels_first':
output_shape = (None, None, new_height, new_width)
else:
output_shape = (None, new_height, new_width, None)
x.set_shape(output_shape)
return x
@keras_export('keras.backend.resize_volumes')
@dispatch.add_dispatch_support
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format):
"""Resizes the volume contained in a 5D tensor.
Arguments:
x: Tensor or variable to resize.
depth_factor: Positive integer.
height_factor: Positive integer.
width_factor: Positive integer.
data_format: One of `"channels_first"`, `"channels_last"`.
Returns:
A tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
if data_format == 'channels_first':
output = repeat_elements(x, depth_factor, axis=2)
output = repeat_elements(output, height_factor, axis=3)
output = repeat_elements(output, width_factor, axis=4)
return output
elif data_format == 'channels_last':
output = repeat_elements(x, depth_factor, axis=1)
output = repeat_elements(output, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
else:
raise ValueError('Invalid data_format: ' + str(data_format))
@keras_export('keras.backend.repeat_elements')
@dispatch.add_dispatch_support
def repeat_elements(x, rep, axis):
"""Repeats the elements of a tensor along an axis, like `np.repeat`.
If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output
will have shape `(s1, s2 * rep, s3)`.
Arguments:
x: Tensor or variable.
rep: Python integer, number of times to repeat.
axis: Axis along which to repeat.
Returns:
A tensor.
Example:
>>> b = tf.constant([1, 2, 3])
>>> tf.keras.backend.repeat_elements(b, rep=2, axis=0)
<tf.Tensor: shape=(6,), dtype=int32,
numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>
"""
x_shape = x.shape.as_list()
# For static axis
if x_shape[axis] is not None:
# slices along the repeat axis
splits = array_ops.split(value=x,
num_or_size_splits=x_shape[axis],
axis=axis)
# repeat each slice the given number of reps
x_rep = [s for s in splits for _ in range(rep)]
return concatenate(x_rep, axis)
# Here we use tf.tile to mimic behavior of np.repeat so that
# we can handle dynamic shapes (that include None).
# To do that, we need an auxiliary axis to repeat elements along
# it and then merge them along the desired axis.
# Repeating
auxiliary_axis = axis + 1
x_shape = array_ops.shape(x)
x_rep = array_ops.expand_dims(x, axis=auxiliary_axis)
reps = np.ones(len(x.shape) + 1)
reps[auxiliary_axis] = rep
x_rep = array_ops.tile(x_rep, reps)
# Merging
reps = np.delete(reps, auxiliary_axis)
reps[axis] = rep
reps = array_ops.constant(reps, dtype='int32')
x_shape *= reps
x_rep = array_ops.reshape(x_rep, x_shape)
# Fix shape representation
x_shape = x.shape.as_list()
x_rep.set_shape(x_shape)
x_rep._keras_shape = tuple(x_shape)
return x_rep
@keras_export('keras.backend.repeat')
@dispatch.add_dispatch_support
def repeat(x, n):
"""Repeats a 2D tensor.
if `x` has shape (samples, dim) and `n` is `2`,
the output will have shape `(samples, 2, dim)`.
Arguments:
x: Tensor or variable.
n: Python integer, number of times to repeat.
Returns:
A tensor.
Example:
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.repeat(b, n=2)
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[1, 2]],
[[3, 4],
[3, 4]]], dtype=int32)>
"""
assert ndim(x) == 2
x = array_ops.expand_dims(x, 1)
pattern = array_ops.stack([1, n, 1])
return array_ops.tile(x, pattern)
@keras_export('keras.backend.arange')
@dispatch.add_dispatch_support
def arange(start, stop=None, step=1, dtype='int32'):
"""Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is `'int32'` to
match TensorFlow's default.
Arguments:
start: Start value.
stop: Stop value.
step: Difference between two successive values.
dtype: Integer dtype to use.
Returns:
An integer tensor.
Example:
>>> tf.keras.backend.arange(start=0, stop=10, step=1.5)
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>
"""
# Match the behavior of numpy and Theano by returning an empty sequence.
if stop is None and start < 0:
start = 0
result = math_ops.range(start, limit=stop, delta=step, name='arange')
if dtype != 'int32':
result = cast(result, dtype)
return result
@keras_export('keras.backend.tile')
@dispatch.add_dispatch_support
def tile(x, n):
"""Creates a tensor by tiling `x` by `n`.
Arguments:
x: A tensor or variable
n: A list of integer. The length must be the same as the number of
dimensions in `x`.
Returns:
A tiled tensor.
"""
if isinstance(n, int):
n = [n]
return array_ops.tile(x, n)
@keras_export('keras.backend.flatten')
@dispatch.add_dispatch_support
def flatten(x):
"""Flatten a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor, reshaped into 1-D
Example:
>>> b = tf.constant([[1, 2], [3, 4]])
>>> b
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[1, 2],
[3, 4]], dtype=int32)>
>>> tf.keras.backend.flatten(b)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([1, 2, 3, 4], dtype=int32)>
"""
return array_ops.reshape(x, [-1])
@keras_export('keras.backend.batch_flatten')
@dispatch.add_dispatch_support
def batch_flatten(x):
"""Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
Examples:
Flattening a 3D tensor to 2D by collapsing the last dimension.
>>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5))
>>> x_batch_flatten = batch_flatten(x_batch)
>>> tf.keras.backend.int_shape(x_batch_flatten)
(2, 60)
"""
x = array_ops.reshape(x, array_ops.stack([-1, prod(shape(x)[1:])]))
return x
@keras_export('keras.backend.expand_dims')
@dispatch.add_dispatch_support
def expand_dims(x, axis=-1):
"""Adds a 1-sized dimension at index "axis".
Arguments:
x: A tensor or variable.
axis: Position where to add a new axis.
Returns:
A tensor with expanded dimensions.
"""
return array_ops.expand_dims(x, axis)
@keras_export('keras.backend.squeeze')
@dispatch.add_dispatch_support
def squeeze(x, axis):
"""Removes a 1-dimension from the tensor at index "axis".
Arguments:
x: A tensor or variable.
axis: Axis to drop.
Returns:
A tensor with the same data as `x` but reduced dimensions.
"""
return array_ops.squeeze(x, [axis])
@keras_export('keras.backend.temporal_padding')
@dispatch.add_dispatch_support
def temporal_padding(x, padding=(1, 1)):
"""Pads the middle dimension of a 3D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 integers, how many zeros to
add at the start and end of dim 1.
Returns:
A padded 3D tensor.
"""
assert len(padding) == 2
pattern = [[0, 0], [padding[0], padding[1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_2d_padding')
@dispatch.add_dispatch_support
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None):
"""Pads the 2nd and 3rd dimensions of a 4D tensor.
Arguments:
x: Tensor or variable.
padding: Tuple of 2 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 4D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 2
assert len(padding[0]) == 2
assert len(padding[1]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])]
else:
pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.spatial_3d_padding')
@dispatch.add_dispatch_support
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None):
"""Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively
"padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format,
the 2nd, 3rd and 4th dimension will be padded.
For 'channels_first' data_format,
the 3rd, 4th and 5th dimension will be padded.
Arguments:
x: Tensor or variable.
padding: Tuple of 3 tuples, padding pattern.
data_format: One of `channels_last` or `channels_first`.
Returns:
A padded 5D tensor.
Raises:
ValueError: if `data_format` is neither
`channels_last` or `channels_first`.
"""
assert len(padding) == 3
assert len(padding[0]) == 2
assert len(padding[1]) == 2
assert len(padding[2]) == 2
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if data_format == 'channels_first':
pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]]
else:
pattern = [[0, 0], [padding[0][0], padding[0][1]],
[padding[1][0], padding[1][1]], [padding[2][0],
padding[2][1]], [0, 0]]
return array_ops.pad(x, pattern)
@keras_export('keras.backend.stack')
@dispatch.add_dispatch_support
def stack(x, axis=0):
"""Stacks a list of rank `R` tensors into a rank `R+1` tensor.
Arguments:
x: List of tensors.
axis: Axis along which to perform stacking.
Returns:
A tensor.
Example:
>>> a = tf.constant([[1, 2],[3, 4]])
>>> b = tf.constant([[10, 20],[30, 40]])
>>> tf.keras.backend.stack((a, b))
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 1, 2],
[ 3, 4]],
[[10, 20],
[30, 40]]], dtype=int32)>
"""
return array_ops.stack(x, axis=axis)
@keras_export('keras.backend.one_hot')
@dispatch.add_dispatch_support
def one_hot(indices, num_classes):
"""Computes the one-hot representation of an integer tensor.
Arguments:
indices: nD integer tensor of shape
`(batch_size, dim1, dim2, ... dim(n-1))`
num_classes: Integer, number of classes to consider.
Returns:
(n + 1)D one hot representation of the input
with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)`
Returns:
The one-hot tensor.
"""
return array_ops.one_hot(indices, depth=num_classes, axis=-1)
@keras_export('keras.backend.reverse')
@dispatch.add_dispatch_support
def reverse(x, axes):
"""Reverse a tensor along the specified axes.
Arguments:
x: Tensor to reverse.
axes: Integer or iterable of integers.
Axes to reverse.
Returns:
A tensor.
"""
if isinstance(axes, int):
axes = [axes]
return array_ops.reverse(x, axes)
# VALUE MANIPULATION
_VALUE_SET_CODE_STRING = """
>>> K = tf.keras.backend # Common keras convention
>>> v = K.variable(1.)
>>> # reassign
>>> K.set_value(v, 2.)
>>> print(K.get_value(v))
2.0
>>> # increment
>>> K.set_value(v, K.get_value(v) + 1)
>>> print(K.get_value(v))
3.0
Variable semantics in TensorFlow 2 are eager execution friendly. The above
code is roughly equivalent to:
>>> v = tf.Variable(1.)
>>> v.assign(2.)
>>> print(v.numpy())
2.0
>>> v.assign_add(1.)
>>> print(v.numpy())
3.0"""[3:] # Prune first newline and indent to match the docstring template.
@keras_export('keras.backend.get_value')
def get_value(x):
"""Returns the value of a variable.
`backend.get_value` is the compliment of `backend.set_value`, and provides
a generic interface for reading from variables while abstracting away the
differences between TensorFlow 1.x and 2.x semantics.
{snippet}
Arguments:
x: input variable.
Returns:
A Numpy array.
"""
if not tensor_util.is_tensor(x):
return x
if context.executing_eagerly() or isinstance(x, ops.EagerTensor):
return x.numpy()
if not getattr(x, '_in_graph_mode', True):
# This is a variable which was created in an eager context, but is being
# evaluated from a Graph.
with context.eager_mode():
return x.numpy()
if ops.executing_eagerly_outside_functions():
# This method of evaluating works inside the Keras FuncGraph.
return eval_in_eager_or_function(x)
with x.graph.as_default():
return x.eval(session=get_session((x,)))
@keras_export('keras.backend.batch_get_value')
@dispatch.add_dispatch_support
def batch_get_value(tensors):
"""Returns the value of more than one tensor variable.
Arguments:
tensors: list of ops to run.
Returns:
A list of Numpy arrays.
Raises:
RuntimeError: If this method is called inside defun.
"""
if context.executing_eagerly():
return [x.numpy() for x in tensors]
elif ops.inside_function(): # pylint: disable=protected-access
raise RuntimeError('Cannot get value inside Tensorflow graph function.')
if tensors:
return get_session(tensors).run(tensors)
else:
return []
@keras_export('keras.backend.set_value')
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
`backend.set_value` is the compliment of `backend.get_value`, and provides
a generic interface for assigning to variables while abstracting away the
differences between TensorFlow 1.x and 2.x semantics.
{snippet}
Arguments:
x: Variable to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value, dtype=dtype(x))
if ops.executing_eagerly_outside_functions():
x.assign(value)
else:
with get_graph().as_default():
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
@keras_export('keras.backend.batch_set_value')
@dispatch.add_dispatch_support
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if ops.executing_eagerly_outside_functions():
for x, value in tuples:
x.assign(np.asarray(value, dtype=dtype(x)))
else:
with get_graph().as_default():
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value, dtype=dtype(x))
tf_dtype = dtypes_module.as_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
# In order to support assigning weights to resizable variables in
# Keras, we make a placeholder with the correct number of dimensions
# but with None in each dimension. This way, we can assign weights
# of any size (as long as they have the correct dimensionality).
placeholder_shape = tensor_shape.TensorShape([None] * value.ndim)
assign_placeholder = array_ops.placeholder(
tf_dtype, shape=placeholder_shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
get_value.__doc__ = get_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)
set_value.__doc__ = set_value.__doc__.format(snippet=_VALUE_SET_CODE_STRING)
@keras_export('keras.backend.print_tensor')
@dispatch.add_dispatch_support
def print_tensor(x, message=''):
"""Prints `message` and the tensor value when evaluated.
Note that `print_tensor` returns a new tensor identical to `x`
which should be used in the following code. Otherwise the
print operation is not taken into account during evaluation.
Example:
>>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])
>>> tf.keras.backend.print_tensor(x)
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[1., 2.],
[3., 4.]], dtype=float32)>
Arguments:
x: Tensor to print.
message: Message to print jointly with the tensor.
Returns:
The same tensor `x`, unchanged.
"""
if isinstance(x, ops.Tensor) and hasattr(x, 'graph'):
with get_graph().as_default():
op = logging_ops.print_v2(message, x, output_stream=sys.stdout)
with ops.control_dependencies([op]):
return array_ops.identity(x)
else:
logging_ops.print_v2(message, x, output_stream=sys.stdout)
return x
# GRAPH MANIPULATION
class GraphExecutionFunction(object):
"""Runs a computation graph.
It's possible to pass arguments to `tf.Session.run()` via `session_kwargs`.
In particular additional operations via `fetches` argument and additional
tensor substitutions via `feed_dict` arguments. Note that given
substitutions are merged with substitutions from `inputs`. Even though
`feed_dict` is passed once in the constructor (called in `model.compile()`)
we can modify the values in the dictionary. Through this feed_dict we can
provide additional substitutions besides Keras inputs.
Arguments:
inputs: Feed placeholders to the computation graph.
outputs: Output tensors to fetch.
updates: Additional update ops to be run at function call.
name: A name to help users identify what this function does.
session_kwargs: Arguments to `tf.Session.run()`:
`fetches`, `feed_dict`, `options`, `run_metadata`.
"""
def __init__(self, inputs, outputs, updates=None, name=None,
**session_kwargs):
updates = updates or []
if not isinstance(updates, (list, tuple)):
raise TypeError('`updates` in a Keras backend function '
'should be a list or tuple.')
self._inputs_structure = inputs
self.inputs = nest.flatten(inputs, expand_composites=True)
self._outputs_structure = outputs
self.outputs = cast_variables_to_tensor(
nest.flatten(outputs, expand_composites=True))
# TODO(b/127668432): Consider using autograph to generate these
# dependencies in call.
# Index 0 = total loss or model output for `predict`.
with ops.control_dependencies([self.outputs[0]]):
updates_ops = []
for update in updates:
if isinstance(update, tuple):
p, new_p = update
updates_ops.append(state_ops.assign(p, new_p))
else:
# assumed already an op
updates_ops.append(update)
self.updates_op = control_flow_ops.group(*updates_ops)
self.name = name
# additional tensor substitutions
self.feed_dict = session_kwargs.pop('feed_dict', None)
# additional operations
self.fetches = session_kwargs.pop('fetches', [])
if not isinstance(self.fetches, list):
self.fetches = [self.fetches]
self.run_options = session_kwargs.pop('options', None)
self.run_metadata = session_kwargs.pop('run_metadata', None)
# The main use case of `fetches` being passed to a model is the ability
# to run custom updates
# This requires us to wrap fetches in `identity` ops.
self.fetches = [array_ops.identity(x) for x in self.fetches]
self.session_kwargs = session_kwargs
# This mapping keeps track of the function that should receive the
# output from a fetch in `fetches`: { fetch: function(fetch_output) }
# A Callback can use this to register a function with access to the
# output values for a fetch it added.
self.fetch_callbacks = {}
if session_kwargs:
raise ValueError('Some keys in session_kwargs are not supported at this '
'time: %s' % (session_kwargs.keys(),))
self._callable_fn = None
self._feed_arrays = None
self._feed_symbols = None
self._symbol_vals = None
self._fetches = None
self._session = None
def _make_callable(self, feed_arrays, feed_symbols, symbol_vals, session):
"""Generates a callable that runs the graph.
Arguments:
feed_arrays: List of input tensors to be fed Numpy arrays at runtime.
feed_symbols: List of input tensors to be fed symbolic tensors at runtime.
symbol_vals: List of symbolic tensors to be fed to `feed_symbols`.
session: Session to use to generate the callable.
Returns:
Function that runs the graph according to the above options.
"""
# Prepare callable options.
callable_opts = config_pb2.CallableOptions()
# Handle external-data feed.
for x in feed_arrays:
callable_opts.feed.append(x.name)
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
callable_opts.feed.append(key.name)
# Handle symbolic feed.
for x, y in zip(feed_symbols, symbol_vals):
connection = callable_opts.tensor_connection.add()
if x.dtype != y.dtype:
y = math_ops.cast(y, dtype=x.dtype)
from_tensor = ops._as_graph_element(y)
if from_tensor is None:
from_tensor = y
connection.from_tensor = from_tensor.name # Data tensor
connection.to_tensor = x.name # Placeholder
# Handle fetches.
for x in self.outputs + self.fetches:
callable_opts.fetch.append(x.name)
# Handle updates.
callable_opts.target.append(self.updates_op.name)
# Handle run_options.
if self.run_options:
callable_opts.run_options.CopyFrom(self.run_options)
# Create callable.
callable_fn = session._make_callable_from_options(callable_opts)
# Cache parameters corresponding to the generated callable, so that
# we can detect future mismatches and refresh the callable.
self._callable_fn = callable_fn
self._feed_arrays = feed_arrays
self._feed_symbols = feed_symbols
self._symbol_vals = symbol_vals
self._fetches = list(self.fetches)
self._session = session
def _call_fetch_callbacks(self, fetches_output):
for fetch, output in zip(self._fetches, fetches_output):
if fetch in self.fetch_callbacks:
self.fetch_callbacks[fetch](output)
def _eval_if_composite(self, tensor):
"""Helper method which evaluates any CompositeTensors passed to it."""
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
if isinstance(tensor, composite_tensor.CompositeTensor):
return self._session.run(tensor)
else:
return tensor
def __call__(self, inputs):
inputs = nest.flatten(inputs, expand_composites=True)
session = get_session(inputs)
feed_arrays = []
array_vals = []
feed_symbols = []
symbol_vals = []
for tensor, value in zip(self.inputs, inputs):
if value is None:
continue
if tensor_util.is_tensor(value):
# Case: feeding symbolic tensor.
feed_symbols.append(tensor)
symbol_vals.append(value)
else:
# Case: feeding Numpy array.
feed_arrays.append(tensor)
# We need to do array conversion and type casting at this level, since
# `callable_fn` only supports exact matches.
tensor_type = dtypes_module.as_dtype(tensor.dtype)
array_vals.append(np.asarray(value,
dtype=tensor_type.as_numpy_dtype))
if self.feed_dict:
for key in sorted(self.feed_dict.keys()):
array_vals.append(
np.asarray(self.feed_dict[key], dtype=key.dtype.base_dtype.name))
# Refresh callable if anything has changed.
if (self._callable_fn is None or feed_arrays != self._feed_arrays or
symbol_vals != self._symbol_vals or
feed_symbols != self._feed_symbols or self.fetches != self._fetches or
session != self._session):
self._make_callable(feed_arrays, feed_symbols, symbol_vals, session)
fetched = self._callable_fn(*array_vals,
run_metadata=self.run_metadata)
self._call_fetch_callbacks(fetched[-len(self._fetches):])
output_structure = nest.pack_sequence_as(
self._outputs_structure,
fetched[:len(self.outputs)],
expand_composites=True)
# We need to evaluate any composite tensor objects that have been
# reconstructed in 'pack_sequence_as', since otherwise they'll be output as
# actual CompositeTensor objects instead of the value(s) contained in the
# CompositeTensors. E.g., if output_structure contains a SparseTensor, then
# this ensures that we return its value as a SparseTensorValue rather than
# a SparseTensor.
return nest.map_structure(self._eval_if_composite, output_structure)
def eval_in_eager_or_function(outputs):
"""Method to evaluate a tensor in eager or in a tf.function.
In the case of a tf.function, it will lift the tensor out of the function
and try to evaluate that piece of the graph.
Warning: Do not add new usages of this function.
TODO(b/150169018): delete this function once _keras_history_helper is no
longer needed, after Keras switches to KerasTensors and op layers
work via dispatch.
Arguments:
outputs: tensors to fetch.
Returns:
The value of the tensors (as numpy arrays).
"""
outputs_structure = outputs
outputs = nest.flatten(outputs, expand_composites=True)
graphs = {
i.graph
for i in nest.flatten([outputs])
if hasattr(i, 'graph')
}
if len(graphs) > 1:
raise ValueError('Cannot create an execution function which is comprised '
'of elements from multiple graphs.')
source_graph = graphs.pop()
with _scratch_graph() as exec_graph:
global_graph = get_graph()
if source_graph not in (exec_graph, global_graph):
raise ValueError('Unknown graph. Aborting.')
if source_graph is global_graph and exec_graph is not global_graph:
init_tensors = outputs
lifted_map = lift_to_graph.lift_to_graph(
tensors=init_tensors,
graph=exec_graph,
sources=[],
add_sources=True,
handle_captures=True,
base_graph=source_graph)
outputs = [lifted_map[i] for i in outputs]
# Consolidate updates
with exec_graph.as_default():
outputs = cast_variables_to_tensor(outputs)
exec_graph.inputs = exec_graph.internal_captures
exec_graph.outputs = outputs
graph_fn = eager_function.ConcreteFunction(exec_graph)
graph_fn._num_positional_args = 0
graph_fn._arg_keywords = []
outputs = graph_fn()
# EagerTensor.numpy() will often make a copy to ensure memory safety.
# However in this case `outputs` is not directly returned, so it is always
# safe to reuse the underlying buffer without checking. In such a case the
# private numpy conversion method is preferred to guarantee performance.
return nest.pack_sequence_as(
outputs_structure,
[x._numpy() for x in outputs], # pylint: disable=protected-access
expand_composites=True)
@keras_export('keras.backend.function')
def function(inputs, outputs, updates=None, name=None, **kwargs):
"""Instantiates a Keras function.
Arguments:
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
name: String, name of function.
**kwargs: Passed to `tf.Session.run`.
Returns:
Output values as Numpy arrays.
Raises:
ValueError: if invalid kwargs are passed in or if in eager execution.
"""
if ops.executing_eagerly_outside_functions():
if kwargs:
raise ValueError('Session keyword arguments are not supported during '
'eager execution. You passed: %s' % (kwargs,))
if updates:
raise ValueError('`updates` argument is not supported during '
'eager execution. You passed: %s' % (updates,))
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.utils import tf_utils # pylint: disable=g-import-not-at-top
model = models.Model(inputs=inputs, outputs=outputs)
wrap_outputs = isinstance(outputs, list) and len(outputs) == 1
def func(model_inputs):
outs = model(model_inputs)
if wrap_outputs:
outs = [outs]
return tf_utils.to_numpy_or_python_type(outs)
return func
if kwargs:
for key in kwargs:
if (key not in tf_inspect.getfullargspec(session_module.Session.run)[0]
and key not in ['inputs', 'outputs', 'updates', 'name']):
msg = ('Invalid argument "%s" passed to K.function with TensorFlow '
'backend') % key
raise ValueError(msg)
return GraphExecutionFunction(
inputs, outputs, updates=updates, name=name, **kwargs)
@keras_export('keras.backend.gradients')
def gradients(loss, variables):
"""Returns the gradients of `loss` w.r.t. `variables`.
Arguments:
loss: Scalar tensor to minimize.
variables: List of variables.
Returns:
A gradients tensor.
"""
return gradients_module.gradients(
loss, variables, colocate_gradients_with_ops=True)
@keras_export('keras.backend.stop_gradient')
@dispatch.add_dispatch_support
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
Arguments:
variables: Tensor or list of tensors to consider constant with respect
to any other variable.
Returns:
A single tensor or a list of tensors (depending on the passed argument)
that has no gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(array_ops.stop_gradient, variables)
return array_ops.stop_gradient(variables)
# CONTROL FLOW
@keras_export('keras.backend.rnn')
@dispatch.add_dispatch_support
def rnn(step_function,
inputs,
initial_states,
go_backwards=False,
mask=None,
constants=None,
unroll=False,
input_length=None,
time_major=False,
zero_output_for_mask=False):
"""Iterates over the time dimension of a tensor.
Arguments:
step_function: RNN step function.
Args;
input; Tensor with shape `(samples, ...)` (no time dimension),
representing input for the batch of samples at a certain
time step.
states; List of tensors.
Returns;
output; Tensor with shape `(samples, output_dim)`
(no time dimension).
new_states; List of tensors, same length and shapes
as 'states'. The first state in the list must be the
output tensor at the previous timestep.
inputs: Tensor of temporal data of shape `(samples, time, ...)`
(at least 3D), or nested tensors, and each of which has shape
`(samples, time, ...)`.
initial_states: Tensor with shape `(samples, state_size)`
(no time dimension), containing the initial values for the states used
in the step function. In the case that state_size is in a nested
shape, the shape of initial_states will also follow the nested
structure.
go_backwards: Boolean. If True, do the iteration over the time
dimension in reverse order and return the reversed sequence.
mask: Binary tensor with shape `(samples, time, 1)`,
with a zero for every element that is masked.
constants: List of constant values passed at each step.
unroll: Whether to unroll the RNN or to use a symbolic `while_loop`.
input_length: An integer or a 1-D Tensor, depending on whether
the time dimension is fixed-length or not. In case of variable length
input, it is used for masking in case there's no mask specified.
time_major: Boolean. If true, the inputs and outputs will be in shape
`(timesteps, batch, ...)`, whereas in the False case, it will be
`(batch, timesteps, ...)`. Using `time_major = True` is a bit more
efficient because it avoids transposes at the beginning and end of the
RNN calculation. However, most TensorFlow data is batch-major, so by
default this function accepts input and emits output in batch-major
form.
zero_output_for_mask: Boolean. If True, the output for masked timestep
will be zeros, whereas in the False case, output from previous
timestep is returned.
Returns:
A tuple, `(last_output, outputs, new_states)`.
last_output: the latest output of the rnn, of shape `(samples, ...)`
outputs: tensor with shape `(samples, time, ...)` where each
entry `outputs[s, t]` is the output of the step function
at time `t` for sample `s`.
new_states: list of tensors, latest states returned by
the step function, of shape `(samples, ...)`.
Raises:
ValueError: if input dimension is less than 3.
ValueError: if `unroll` is `True` but input timestep is not a fixed
number.
ValueError: if `mask` is provided (not `None`) but states is not provided
(`len(states)` == 0).
"""
def swap_batch_timestep(input_t):
# Swap the batch and timestep dim for the incoming tensor.
axes = list(range(len(input_t.shape)))
axes[0], axes[1] = 1, 0
return array_ops.transpose(input_t, axes)
if not time_major:
inputs = nest.map_structure(swap_batch_timestep, inputs)
flatted_inputs = nest.flatten(inputs)
time_steps = flatted_inputs[0].shape[0]
batch = flatted_inputs[0].shape[1]
time_steps_t = array_ops.shape(flatted_inputs[0])[0]
for input_ in flatted_inputs:
input_.shape.with_rank_at_least(3)
if mask is not None:
if mask.dtype != dtypes_module.bool:
mask = math_ops.cast(mask, dtypes_module.bool)
if len(mask.shape) == 2:
mask = expand_dims(mask)
if not time_major:
mask = swap_batch_timestep(mask)
if constants is None:
constants = []
# tf.where needs its condition tensor to be the same shape as its two
# result tensors, but in our case the condition (mask) tensor is
# (nsamples, 1), and inputs are (nsamples, ndimensions) or even more.
# So we need to broadcast the mask to match the shape of inputs.
# That's what the tile call does, it just repeats the mask along its
# second dimension n times.
def _expand_mask(mask_t, input_t, fixed_dim=1):
if nest.is_nested(mask_t):
raise ValueError('mask_t is expected to be tensor, but got %s' % mask_t)
if nest.is_nested(input_t):
raise ValueError('input_t is expected to be tensor, but got %s' % input_t)
rank_diff = len(input_t.shape) - len(mask_t.shape)
for _ in range(rank_diff):
mask_t = array_ops.expand_dims(mask_t, -1)
multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:]
return array_ops.tile(mask_t, multiples)
if unroll:
if not time_steps:
raise ValueError('Unrolling requires a fixed number of timesteps.')
states = tuple(initial_states)
successive_states = []
successive_outputs = []
# Process the input tensors. The input tensor need to be split on the
# time_step dim, and reverse if go_backwards is True. In the case of nested
# input, the input is flattened and then transformed individually.
# The result of this will be a tuple of lists, each of the item in tuple is
# list of the tensor with shape (batch, feature)
def _process_single_input_t(input_t):
input_t = array_ops.unstack(input_t) # unstack for time_step dim
if go_backwards:
input_t.reverse()
return input_t
if nest.is_nested(inputs):
processed_input = nest.map_structure(_process_single_input_t, inputs)
else:
processed_input = (_process_single_input_t(inputs),)
def _get_input_tensor(time):
inp = [t_[time] for t_ in processed_input]
return nest.pack_sequence_as(inputs, inp)
if mask is not None:
mask_list = array_ops.unstack(mask)
if go_backwards:
mask_list.reverse()
for i in range(time_steps):
inp = _get_input_tensor(i)
mask_t = mask_list[i]
output, new_states = step_function(inp,
tuple(states) + tuple(constants))
tiled_mask_t = _expand_mask(mask_t, output)
if not successive_outputs:
prev_output = zeros_like(output)
else:
prev_output = successive_outputs[-1]
output = array_ops.where_v2(tiled_mask_t, output, prev_output)
flat_states = nest.flatten(states)
flat_new_states = nest.flatten(new_states)
tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_states)
flat_final_states = tuple(
array_ops.where_v2(m, s, ps)
for m, s, ps in zip(tiled_mask_t, flat_new_states, flat_states))
states = nest.pack_sequence_as(states, flat_final_states)
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
if zero_output_for_mask:
last_output = array_ops.where_v2(
_expand_mask(mask_list[-1], last_output), last_output,
zeros_like(last_output))
outputs = array_ops.where_v2(
_expand_mask(mask, outputs, fixed_dim=2), outputs,
zeros_like(outputs))
else: # mask is None
for i in range(time_steps):
inp = _get_input_tensor(i)
output, states = step_function(inp, tuple(states) + tuple(constants))
successive_outputs.append(output)
successive_states.append(states)
last_output = successive_outputs[-1]
new_states = successive_states[-1]
outputs = array_ops.stack(successive_outputs)
else: # Unroll == False
states = tuple(initial_states)
# Create input tensor array, if the inputs is nested tensors, then it will
# be flattened first, and tensor array will be created one per flattened
# tensor.
input_ta = tuple(
tensor_array_ops.TensorArray(
dtype=inp.dtype,
size=time_steps_t,
tensor_array_name='input_ta_%s' % i)
for i, inp in enumerate(flatted_inputs))
input_ta = tuple(
ta.unstack(input_) if not go_backwards else ta
.unstack(reverse(input_, 0))
for ta, input_ in zip(input_ta, flatted_inputs))
# Get the time(0) input and compute the output for that, the output will be
# used to determine the dtype of output tensor array. Don't read from
# input_ta due to TensorArray clear_after_read default to True.
input_time_zero = nest.pack_sequence_as(inputs,
[inp[0] for inp in flatted_inputs])
# output_time_zero is used to determine the cell output shape and its dtype.
# the value is discarded.
output_time_zero, _ = step_function(
input_time_zero, tuple(initial_states) + tuple(constants))
output_ta = tuple(
tensor_array_ops.TensorArray(
dtype=out.dtype,
size=time_steps_t,
element_shape=out.shape,
tensor_array_name='output_ta_%s' % i)
for i, out in enumerate(nest.flatten(output_time_zero)))
time = constant_op.constant(0, dtype='int32', name='time')
# We only specify the 'maximum_iterations' when building for XLA since that
# causes slowdowns on GPU in TF.
if (not context.executing_eagerly() and
control_flow_util.GraphOrParentsInXlaContext(ops.get_default_graph())):
max_iterations = math_ops.reduce_max(input_length)
else:
max_iterations = None
while_loop_kwargs = {
'cond': lambda time, *_: time < time_steps_t,
'maximum_iterations': max_iterations,
'parallel_iterations': 32,
'swap_memory': True,
}
if mask is not None:
if go_backwards:
mask = reverse(mask, 0)
mask_ta = tensor_array_ops.TensorArray(
dtype=dtypes_module.bool,
size=time_steps_t,
tensor_array_name='mask_ta')
mask_ta = mask_ta.unstack(mask)
def masking_fn(time):
return mask_ta.read(time)
def compute_masked_output(mask_t, flat_out, flat_mask):
tiled_mask_t = tuple(
_expand_mask(mask_t, o, fixed_dim=len(mask_t.shape))
for o in flat_out)
return tuple(
array_ops.where_v2(m, o, fm)
for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask))
elif isinstance(input_length, ops.Tensor):
if go_backwards:
max_len = math_ops.reduce_max(input_length, axis=0)
rev_input_length = math_ops.subtract(max_len - 1, input_length)
def masking_fn(time):
return math_ops.less(rev_input_length, time)
else:
def masking_fn(time):
return math_ops.greater(input_length, time)
def compute_masked_output(mask_t, flat_out, flat_mask):
return tuple(
array_ops.where(mask_t, o, zo)
for (o, zo) in zip(flat_out, flat_mask))
else:
masking_fn = None
if masking_fn is not None:
# Mask for the T output will be base on the output of T - 1. In the case
# T = 0, a zero filled tensor will be used.
flat_zero_output = tuple(array_ops.zeros_like(o)
for o in nest.flatten(output_time_zero))
def _step(time, output_ta_t, prev_output, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
prev_output: tuple of outputs from time - 1.
*states: List of states.
Returns:
Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
# maybe set shape.
current_input = nest.pack_sequence_as(inputs, current_input)
mask_t = masking_fn(time)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
# mask output
flat_output = nest.flatten(output)
flat_mask_output = (flat_zero_output if zero_output_for_mask
else nest.flatten(prev_output))
flat_new_output = compute_masked_output(mask_t, flat_output,
flat_mask_output)
# mask states
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_final_state = compute_masked_output(mask_t, flat_new_state,
flat_state)
new_states = nest.pack_sequence_as(new_states, flat_final_state)
output_ta_t = tuple(
ta.write(time, out)
for ta, out in zip(output_ta_t, flat_new_output))
return (time + 1, output_ta_t,
tuple(flat_new_output)) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta, flat_zero_output) + states,
**while_loop_kwargs)
# Skip final_outputs[2] which is the output for final timestep.
new_states = final_outputs[3:]
else:
def _step(time, output_ta_t, *states):
"""RNN step function.
Arguments:
time: Current timestep value.
output_ta_t: TensorArray.
*states: List of states.
Returns:
Tuple: `(time + 1,output_ta_t) + tuple(new_states)`
"""
current_input = tuple(ta.read(time) for ta in input_ta)
current_input = nest.pack_sequence_as(inputs, current_input)
output, new_states = step_function(current_input,
tuple(states) + tuple(constants))
flat_state = nest.flatten(states)
flat_new_state = nest.flatten(new_states)
for state, new_state in zip(flat_state, flat_new_state):
if isinstance(new_state, ops.Tensor):
new_state.set_shape(state.shape)
flat_output = nest.flatten(output)
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, flat_output))
new_states = nest.pack_sequence_as(initial_states, flat_new_state)
return (time + 1, output_ta_t) + tuple(new_states)
final_outputs = control_flow_ops.while_loop(
body=_step,
loop_vars=(time, output_ta) + states,
**while_loop_kwargs)
new_states = final_outputs[2:]
output_ta = final_outputs[1]
outputs = tuple(o.stack() for o in output_ta)
last_output = tuple(o[-1] for o in outputs)
outputs = nest.pack_sequence_as(output_time_zero, outputs)
last_output = nest.pack_sequence_as(output_time_zero, last_output)
# static shape inference
def set_shape(output_):
if isinstance(output_, ops.Tensor):
shape = output_.shape.as_list()
shape[0] = time_steps
shape[1] = batch
output_.set_shape(shape)
return output_
outputs = nest.map_structure(set_shape, outputs)
if not time_major:
outputs = nest.map_structure(swap_batch_timestep, outputs)
return last_output, outputs, new_states
@keras_export('keras.backend.switch')
@dispatch.add_dispatch_support
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value.
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
Arguments:
condition: tensor (`int` or `bool`).
then_expression: either a tensor, or a callable that returns a tensor.
else_expression: either a tensor, or a callable that returns a tensor.
Returns:
The selected tensor.
Raises:
ValueError: If rank of `condition` is greater than rank of expressions.
"""
if condition.dtype != dtypes_module.bool:
condition = math_ops.cast(condition, 'bool')
cond_ndim = ndim(condition)
if not cond_ndim:
if not callable(then_expression):
def then_expression_fn():
return then_expression
else:
then_expression_fn = then_expression
if not callable(else_expression):
def else_expression_fn():
return else_expression
else:
else_expression_fn = else_expression
x = control_flow_ops.cond(condition, then_expression_fn, else_expression_fn)
else:
# tf.where needs its condition tensor
# to be the same shape as its two
# result tensors
if callable(then_expression):
then_expression = then_expression()
if callable(else_expression):
else_expression = else_expression()
expr_ndim = ndim(then_expression)
if cond_ndim > expr_ndim:
raise ValueError('Rank of `condition` should be less than or'
' equal to rank of `then_expression` and '
'`else_expression`. ndim(condition)=' + str(cond_ndim) +
', ndim(then_expression)'
'=' + str(expr_ndim))
if cond_ndim > 1:
ndim_diff = expr_ndim - cond_ndim
cond_shape = array_ops.concat(
[array_ops.shape(condition), [1] * ndim_diff], axis=0)
condition = array_ops.reshape(condition, cond_shape)
expr_shape = array_ops.shape(then_expression)
shape_diff = expr_shape - cond_shape
tile_shape = array_ops.where_v2(shape_diff > 0, expr_shape,
array_ops.ones_like(expr_shape))
condition = array_ops.tile(condition, tile_shape)
x = array_ops.where_v2(condition, then_expression, else_expression)
return x
@keras_export('keras.backend.in_train_phase')
def in_train_phase(x, alt, training=None):
"""Selects `x` in train phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in train phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on the `training` flag.
the `training` flag defaults to `K.learning_phase()`.
"""
from tensorflow.python.keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top
if training is None:
training = base_layer_utils.call_context().training
if training is None:
training = learning_phase()
# TODO(b/138862903): Handle the case when training is tensor.
if not tensor_util.is_tensor(training):
if training == 1 or training is True:
if callable(x):
return x()
else:
return x
elif training == 0 or training is False:
if callable(alt):
return alt()
else:
return alt
# else: assume learning phase is a placeholder tensor.
x = switch(training, x, alt)
return x
@keras_export('keras.backend.in_test_phase')
def in_test_phase(x, alt, training=None):
"""Selects `x` in test phase, and `alt` otherwise.
Note that `alt` should have the *same shape* as `x`.
Arguments:
x: What to return in test phase
(tensor or callable that returns a tensor).
alt: What to return otherwise
(tensor or callable that returns a tensor).
training: Optional scalar tensor
(or Python boolean, or Python integer)
specifying the learning phase.
Returns:
Either `x` or `alt` based on `K.learning_phase`.
"""
return in_train_phase(alt, x, training=training)
# NN OPERATIONS
@keras_export('keras.backend.relu')
@dispatch.add_dispatch_support
def relu(x, alpha=0., max_value=None, threshold=0):
"""Rectified linear unit.
With default values, it returns element-wise `max(x, 0)`.
Otherwise, it follows:
`f(x) = max_value` for `x >= max_value`,
`f(x) = x` for `threshold <= x < max_value`,
`f(x) = alpha * (x - threshold)` otherwise.
Arguments:
x: A tensor or variable.
alpha: A scalar, slope of negative section (default=`0.`).
max_value: float. Saturation threshold.
threshold: float. Threshold value for thresholded activation.
Returns:
A tensor.
"""
# While x can be a tensor or variable, we also see cases where
# numpy arrays, lists, tuples are passed as well.
# lists, tuples do not have 'dtype' attribute.
dtype = getattr(x, 'dtype', floatx())
if alpha != 0.:
if max_value is None and threshold == 0:
return nn.leaky_relu(x, alpha=alpha)
if threshold != 0:
negative_part = nn.relu(-x + threshold)
else:
negative_part = nn.relu(-x)
clip_max = max_value is not None
if threshold != 0:
# computes x for x > threshold else 0
x = x * math_ops.cast(math_ops.greater(x, threshold), dtype=dtype)
elif max_value == 6:
# if no threshold, then can use nn.relu6 native TF op for performance
x = nn.relu6(x)
clip_max = False
else:
x = nn.relu(x)
if clip_max:
max_value = _constant_to_tensor(max_value, x.dtype.base_dtype)
zero = _constant_to_tensor(0, x.dtype.base_dtype)
x = clip_ops.clip_by_value(x, zero, max_value)
if alpha != 0.:
alpha = _to_tensor(alpha, x.dtype.base_dtype)
x -= alpha * negative_part
return x
@keras_export('keras.backend.elu')
@dispatch.add_dispatch_support
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x: A tensor or variable to compute the activation function for.
alpha: A scalar, slope of negative section.
Returns:
A tensor.
"""
res = nn.elu(x)
if alpha == 1:
return res
else:
return array_ops.where_v2(x > 0, res, alpha * res)
@keras_export('keras.backend.softmax')
@dispatch.add_dispatch_support
def softmax(x, axis=-1):
"""Softmax of a tensor.
Arguments:
x: A tensor or variable.
axis: The dimension softmax would be performed on.
The default is -1 which indicates the last dimension.
Returns:
A tensor.
"""
return nn.softmax(x, axis=axis)
@keras_export('keras.backend.softplus')
@dispatch.add_dispatch_support
def softplus(x):
"""Softplus of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softplus(x)
@keras_export('keras.backend.softsign')
@dispatch.add_dispatch_support
def softsign(x):
"""Softsign of a tensor.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.softsign(x)
@keras_export('keras.backend.categorical_crossentropy')
@dispatch.add_dispatch_support
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor of the same shape as `output`.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
Example:
>>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3])
>>> print(a)
tf.Tensor(
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]], shape=(3, 3), dtype=float32)
>>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94], shape=[3,3])
>>> print(b)
tf.Tensor(
[[0.9 0.05 0.05]
[0.05 0.89 0.06]
[0.05 0.01 0.94]], shape=(3, 3), dtype=float32)
>>> loss = tf.keras.backend.categorical_crossentropy(a, b)
>>> print(np.around(loss, 5))
[0.10536 0.11653 0.06188]
>>> loss = tf.keras.backend.categorical_crossentropy(a, a)
>>> print(np.around(loss, 5))
[0. 0. 0.]
"""
target = ops.convert_to_tensor_v2(target)
output = ops.convert_to_tensor_v2(output)
target.shape.assert_is_compatible_with(output.shape)
if from_logits:
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
if (not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and
output.op.type == 'Softmax') and not hasattr(output, '_keras_history'):
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.softmax_cross_entropy_with_logits_v2(
labels=target, logits=output, axis=axis)
# scale preds so that the class probas of each sample sum to 1
output = output / math_ops.reduce_sum(output, axis, True)
# Compute cross entropy from probabilities.
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
return -math_ops.reduce_sum(target * math_ops.log(output), axis)
@keras_export('keras.backend.sparse_categorical_crossentropy')
@dispatch.add_dispatch_support
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
"""Categorical crossentropy with integer targets.
Arguments:
target: An integer tensor.
output: A tensor resulting from a softmax
(unless `from_logits` is True, in which
case `output` is expected to be the logits).
from_logits: Boolean, whether `output` is the
result of a softmax, or is a tensor of logits.
axis: Int specifying the channels axis. `axis=-1` corresponds to data
format `channels_last', and `axis=1` corresponds to data format
`channels_first`.
Returns:
Output tensor.
Raises:
ValueError: if `axis` is neither -1 nor one of the axes of `output`.
"""
target = ops.convert_to_tensor_v2(target)
output = ops.convert_to_tensor_v2(output)
if (not from_logits and
not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and
output.op.type == 'Softmax') and not hasattr(output, '_keras_history'):
# When softmax activation function is used for output operation, we
# use logits from the softmax function directly to compute loss in order
# to prevent collapsing zero when training.
# See b/117284466
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
from_logits = True
if not from_logits:
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1 - epsilon_)
output = math_ops.log(output)
if isinstance(output.shape, (tuple, list)):
output_rank = len(output.shape)
else:
output_rank = output.shape.ndims
if output_rank is not None:
axis %= output_rank
if axis != output_rank - 1:
permutation = list(
itertools.chain(range(axis), range(axis + 1, output_rank), [axis]))
output = array_ops.transpose(output, perm=permutation)
elif axis != -1:
raise ValueError(
'Cannot compute sparse categorical crossentropy with `axis={}` on an '
'output tensor with unknown rank'.format(axis))
target = cast(target, 'int64')
# Try to adjust the shape so that rank of labels = rank of logits - 1.
output_shape = array_ops.shape_v2(output)
target_rank = target.shape.ndims
update_shape = (
target_rank is not None and output_rank is not None and
target_rank != output_rank - 1)
if update_shape:
target = flatten(target)
output = array_ops.reshape(output, [-1, output_shape[-1]])
if py_any(_is_symbolic_tensor(v) for v in [target, output]):
with get_graph().as_default():
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
else:
res = nn.sparse_softmax_cross_entropy_with_logits_v2(
labels=target, logits=output)
if update_shape and output_rank >= 3:
# If our output includes timesteps or spatial dimensions we need to reshape
return array_ops.reshape(res, output_shape[:-1])
else:
return res
@keras_export('keras.backend.binary_crossentropy')
@dispatch.add_dispatch_support
def binary_crossentropy(target, output, from_logits=False):
"""Binary crossentropy between an output tensor and a target tensor.
Arguments:
target: A tensor with the same shape as `output`.
output: A tensor.
from_logits: Whether `output` is expected to be a logits tensor.
By default, we consider that `output`
encodes a probability distribution.
Returns:
A tensor.
"""
target = ops.convert_to_tensor_v2(target)
output = ops.convert_to_tensor_v2(output)
if from_logits:
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
if (not isinstance(output, (ops.EagerTensor, variables_module.Variable)) and
output.op.type == 'Sigmoid') and not hasattr(output, '_keras_history'):
# When sigmoid activation function is used for output operation, we
# use logits from the sigmoid function directly to compute loss in order
# to prevent collapsing zero when training.
assert len(output.op.inputs) == 1
output = output.op.inputs[0]
return nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype)
output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
# Compute cross entropy from probabilities.
bce = target * math_ops.log(output + epsilon())
bce += (1 - target) * math_ops.log(1 - output + epsilon())
return -bce
@keras_export('keras.backend.sigmoid')
@dispatch.add_dispatch_support
def sigmoid(x):
"""Element-wise sigmoid.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.sigmoid(x)
@keras_export('keras.backend.hard_sigmoid')
@dispatch.add_dispatch_support
def hard_sigmoid(x):
"""Segment-wise linear approximation of sigmoid.
Faster than sigmoid.
Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`.
In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
point_two = _constant_to_tensor(0.2, x.dtype.base_dtype)
point_five = _constant_to_tensor(0.5, x.dtype.base_dtype)
x = math_ops.multiply(x, point_two)
x = math_ops.add(x, point_five)
x = clip_ops.clip_by_value(x, 0., 1.)
return x
@keras_export('keras.backend.tanh')
@dispatch.add_dispatch_support
def tanh(x):
"""Element-wise tanh.
Arguments:
x: A tensor or variable.
Returns:
A tensor.
"""
return nn.tanh(x)
@keras_export('keras.backend.dropout')
@dispatch.add_dispatch_support
def dropout(x, level, noise_shape=None, seed=None):
"""Sets entries in `x` to zero at random, while scaling the entire tensor.
Arguments:
x: tensor
level: fraction of the entries in the tensor
that will be set to 0.
noise_shape: shape for randomly generated keep/drop flags,
must be broadcastable to the shape of `x`
seed: random seed to ensure determinism.
Returns:
A tensor.
"""
if seed is None:
seed = np.random.randint(10e6)
return nn.dropout_v2(x, rate=level, noise_shape=noise_shape, seed=seed)
@keras_export('keras.backend.l2_normalize')
@dispatch.add_dispatch_support
def l2_normalize(x, axis=None):
"""Normalizes a tensor wrt the L2 norm alongside the specified axis.
Arguments:
x: Tensor or variable.
axis: axis along which to perform normalization.
Returns:
A tensor.
"""
return nn.l2_normalize(x, axis=axis)
@keras_export('keras.backend.in_top_k')
@dispatch.add_dispatch_support
def in_top_k(predictions, targets, k):
"""Returns whether the `targets` are in the top `k` `predictions`.
Arguments:
predictions: A tensor of shape `(batch_size, classes)` and type `float32`.
targets: A 1D tensor of length `batch_size` and type `int32` or `int64`.
k: An `int`, number of top elements to consider.
Returns:
A 1D tensor of length `batch_size` and type `bool`.
`output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k`
values of `predictions[i]`.
"""
return nn.in_top_k(predictions, targets, k)
# CONVOLUTIONS
def _preprocess_conv1d_input(x, data_format):
"""Transpose and cast the input before the conv1d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NWC' # to pass TF Conv2dNative operations
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 1)) # NCW -> NWC
else:
tf_data_format = 'NCW'
return x, tf_data_format
def _preprocess_conv2d_input(x, data_format, force_transpose=False):
"""Transpose and cast the input before the conv2d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
force_transpose: Boolean. If True, the input will always be transposed
from NCHW to NHWC if `data_format` is `"channels_first"`.
If False, the transposition only occurs on CPU (GPU ops are
assumed to support NCHW).
Returns:
A tensor.
"""
tf_data_format = 'NHWC'
if data_format == 'channels_first':
if not _has_nchw_support() or force_transpose:
x = array_ops.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC
else:
tf_data_format = 'NCHW'
return x, tf_data_format
def _preprocess_conv3d_input(x, data_format):
"""Transpose and cast the input before the conv3d.
Arguments:
x: input tensor.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor.
"""
tf_data_format = 'NDHWC'
if data_format == 'channels_first':
if not _has_nchw_support():
x = array_ops.transpose(x, (0, 2, 3, 4, 1))
else:
tf_data_format = 'NCDHW'
return x, tf_data_format
def _preprocess_padding(padding):
"""Convert keras' padding to TensorFlow's padding.
Arguments:
padding: string, one of 'same' , 'valid'
Returns:
a string, one of 'SAME', 'VALID'.
Raises:
ValueError: if invalid `padding'`
"""
if padding == 'same':
padding = 'SAME'
elif padding == 'valid':
padding = 'VALID'
else:
raise ValueError('Invalid padding: ' + str(padding))
return padding
@keras_export('keras.backend.conv1d')
@dispatch.add_dispatch_support
def conv1d(x,
kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: stride integer.
padding: string, `"same"`, `"causal"` or `"valid"`.
data_format: string, one of "channels_last", "channels_first".
dilation_rate: integer dilate rate.
Returns:
A tensor, result of 1D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = kernel.shape.as_list()
if padding == 'causal':
# causal (dilated) convolution:
left_pad = dilation_rate * (kernel_shape[0] - 1)
x = temporal_padding(x, (left_pad, 0))
padding = 'valid'
padding = _preprocess_padding(padding)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.conv2d')
@dispatch.add_dispatch_support
def conv2d(x,
kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 2 integers.
Returns:
A tensor, result of 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv2d_transpose')
@dispatch.add_dispatch_support
def conv2d_transpose(x,
kernel,
output_shape,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D deconvolution (i.e.
transposed convolution).
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: Tuple of 2 integers.
Returns:
A tensor, result of transposed 2D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
# `atrous_conv2d_transpose` only supports NHWC format, even on GPU.
if data_format == 'channels_first' and dilation_rate != (1, 1):
force_transpose = True
else:
force_transpose = False
x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[1])
if output_shape[0] is None:
output_shape = (shape(x)[0],) + tuple(output_shape[1:])
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
if dilation_rate == (1, 1):
x = nn.conv2d_transpose(x, kernel, output_shape, strides,
padding=padding,
data_format=tf_data_format)
else:
assert dilation_rate[0] == dilation_rate[1]
x = nn.atrous_conv2d_transpose(
x,
kernel,
output_shape,
rate=dilation_rate[0],
padding=padding)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
def separable_conv1d(x,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding='valid',
data_format=None,
dilation_rate=1):
"""1D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: stride integer.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: integer dilation rate.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(strides, int):
strides = (strides,)
if isinstance(dilation_rate, int):
dilation_rate = (dilation_rate,)
x, tf_data_format = _preprocess_conv1d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NWC':
spatial_start_dim = 1
strides = (1,) + strides * 2 + (1,)
else:
spatial_start_dim = 2
strides = (1, 1) + strides * 2
x = array_ops.expand_dims(x, spatial_start_dim)
depthwise_kernel = array_ops.expand_dims(depthwise_kernel, 0)
pointwise_kernel = array_ops.expand_dims(pointwise_kernel, 0)
dilation_rate = (1,) + dilation_rate
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
x = array_ops.squeeze(x, [spatial_start_dim])
if data_format == 'channels_first' and tf_data_format == 'NWC':
x = array_ops.transpose(x, (0, 2, 1)) # NWC -> NCW
return x
@keras_export('keras.backend.separable_conv2d')
@dispatch.add_dispatch_support
def separable_conv2d(x,
depthwise_kernel,
pointwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
pointwise_kernel: kernel for the 1x1 convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
ValueError: if `strides` is not a tuple of 2 integers.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if not isinstance(strides, tuple):
strides = tuple(strides)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.separable_conv2d(
x,
depthwise_kernel,
pointwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.depthwise_conv2d')
@dispatch.add_dispatch_support
def depthwise_conv2d(x,
depthwise_kernel,
strides=(1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1)):
"""2D convolution with separable filters.
Arguments:
x: input tensor
depthwise_kernel: convolution kernel for the depthwise convolution.
strides: strides tuple (length 2).
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of integers,
dilation rates for the separable convolution.
Returns:
Output tensor.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.depthwise_conv2d(
x,
depthwise_kernel,
strides=strides,
padding=padding,
rate=dilation_rate,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.conv3d')
@dispatch.add_dispatch_support
def conv3d(x,
kernel,
strides=(1, 1, 1),
padding='valid',
data_format=None,
dilation_rate=(1, 1, 1)):
"""3D convolution.
Arguments:
x: Tensor or variable.
kernel: kernel tensor.
strides: strides tuple.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
dilation_rate: tuple of 3 integers.
Returns:
A tensor, result of 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
x = nn.convolution(
input=x,
filter=kernel,
dilation_rate=dilation_rate,
strides=strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def conv3d_transpose(x,
kernel,
output_shape,
strides=(1, 1, 1),
padding='valid',
data_format=None):
"""3D deconvolution (i.e.
transposed convolution).
Arguments:
x: input tensor.
kernel: kernel tensor.
output_shape: 1D int tensor for the output shape.
strides: strides tuple.
padding: string, "same" or "valid".
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
A tensor, result of transposed 3D convolution.
Raises:
ValueError: if `data_format` is neither `channels_last` or
`channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if isinstance(output_shape, (tuple, list)):
output_shape = array_ops.stack(output_shape)
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
output_shape = (output_shape[0], output_shape[2], output_shape[3],
output_shape[4], output_shape[1])
if output_shape[0] is None:
output_shape = (array_ops.shape(x)[0],) + tuple(output_shape[1:])
output_shape = array_ops.stack(list(output_shape))
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
else:
strides = (1, 1) + strides
x = nn.conv3d_transpose(
x,
kernel,
output_shape,
strides,
padding=padding,
data_format=tf_data_format)
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
@keras_export('keras.backend.pool2d')
@dispatch.add_dispatch_support
def pool2d(x,
pool_size,
strides=(1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""2D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 2D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_size` is not a tuple of 2 integers.
ValueError: if `strides` is not a tuple of 2 integers.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
if len(pool_size) != 2:
raise ValueError('`pool_size` must be a tuple of 2 integers.')
if len(strides) != 2:
raise ValueError('`strides` must be a tuple of 2 integers.')
x, tf_data_format = _preprocess_conv2d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NHWC':
x = array_ops.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW
return x
@keras_export('keras.backend.pool3d')
@dispatch.add_dispatch_support
def pool3d(x,
pool_size,
strides=(1, 1, 1),
padding='valid',
data_format=None,
pool_mode='max'):
"""3D Pooling.
Arguments:
x: Tensor or variable.
pool_size: tuple of 3 integers.
strides: tuple of 3 integers.
padding: string, `"same"` or `"valid"`.
data_format: string, `"channels_last"` or `"channels_first"`.
pool_mode: string, `"max"` or `"avg"`.
Returns:
A tensor, result of 3D pooling.
Raises:
ValueError: if `data_format` is neither `"channels_last"` or
`"channels_first"`.
ValueError: if `pool_mode` is neither `"max"` or `"avg"`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
x, tf_data_format = _preprocess_conv3d_input(x, data_format)
padding = _preprocess_padding(padding)
if tf_data_format == 'NDHWC':
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
else:
strides = (1, 1) + strides
pool_size = (1, 1) + pool_size
if pool_mode == 'max':
x = nn.max_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
elif pool_mode == 'avg':
x = nn.avg_pool3d(
x, pool_size, strides, padding=padding, data_format=tf_data_format)
else:
raise ValueError('Invalid pooling mode: ' + str(pool_mode))
if data_format == 'channels_first' and tf_data_format == 'NDHWC':
x = array_ops.transpose(x, (0, 4, 1, 2, 3))
return x
def local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply N-D convolution with un-shared weights.
Arguments:
inputs: (N+2)-D tensor with shape
(batch_size, channels_in, d_in1, ..., d_inN)
if data_format='channels_first', or
(batch_size, d_in1, ..., d_inN, channels_in)
if data_format='channels_last'.
kernel: the unshared weight for N-D convolution,
with shape (output_items, feature_dim, channels_out), where
feature_dim = np.prod(kernel_size) * channels_in,
output_items = np.prod(output_shape).
kernel_size: a tuple of N integers, specifying the
spatial dimensions of the N-D convolution window.
strides: a tuple of N integers, specifying the strides
of the convolution along the spatial dimensions.
output_shape: a tuple of (d_out1, ..., d_outN) specifying the spatial
dimensionality of the output.
data_format: string, "channels_first" or "channels_last".
Returns:
An (N+2)-D tensor with shape:
(batch_size, channels_out) + output_shape
if data_format='channels_first', or:
(batch_size,) + output_shape + (channels_out,)
if data_format='channels_last'.
Raises:
ValueError: if `data_format` is neither
`channels_last` nor `channels_first`.
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
kernel_shape = int_shape(kernel)
feature_dim = kernel_shape[1]
channels_out = kernel_shape[-1]
ndims = len(output_shape)
spatial_dimensions = list(range(ndims))
xs = []
output_axes_ticks = [range(axis_max) for axis_max in output_shape]
for position in itertools.product(*output_axes_ticks):
slices = [slice(None)]
if data_format == 'channels_first':
slices.append(slice(None))
slices.extend(
slice(position[d] * strides[d], position[d] * strides[d] +
kernel_size[d]) for d in spatial_dimensions)
if data_format == 'channels_last':
slices.append(slice(None))
xs.append(reshape(inputs[slices], (1, -1, feature_dim)))
x_aggregate = concatenate(xs, axis=0)
output = batch_dot(x_aggregate, kernel)
output = reshape(output, output_shape + (-1, channels_out))
if data_format == 'channels_first':
permutation = [ndims, ndims + 1] + spatial_dimensions
else:
permutation = [ndims] + spatial_dimensions + [ndims + 1]
return permute_dimensions(output, permutation)
@keras_export('keras.backend.local_conv1d')
@dispatch.add_dispatch_support
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None):
"""Apply 1D conv with un-shared weights.
Arguments:
inputs: 3D tensor with shape:
(batch_size, steps, input_dim)
if data_format is "channels_last" or
(batch_size, input_dim, steps)
if data_format is "channels_first".
kernel: the unshared weight for convolution,
with shape (output_length, feature_dim, filters).
kernel_size: a tuple of a single integer,
specifying the length of the 1D convolution window.
strides: a tuple of a single integer,
specifying the stride length of the convolution.
data_format: the data format, channels_first or channels_last.
Returns:
A 3d tensor with shape:
(batch_size, output_length, filters)
if data_format='channels_first'
or 3D tensor with shape:
(batch_size, filters, output_length)
if data_format='channels_last'.
"""
output_shape = (kernel.shape[0],)
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.local_conv2d')
@dispatch.add_dispatch_support
def local_conv2d(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format=None):
"""Apply 2D conv with un-shared weights.
Arguments:
inputs: 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
kernel: the unshared weight for convolution,
with shape (output_items, feature_dim, filters).
kernel_size: a tuple of 2 integers, specifying the
width and height of the 2D convolution window.
strides: a tuple of 2 integers, specifying the strides
of the convolution along the width and height.
output_shape: a tuple with (output_row, output_col).
data_format: the data format, channels_first or channels_last.
Returns:
A 4D tensor with shape:
(batch_size, filters, new_rows, new_cols)
if data_format='channels_first'
or 4D tensor with shape:
(batch_size, new_rows, new_cols, filters)
if data_format='channels_last'.
"""
return local_conv(inputs,
kernel,
kernel_size,
strides,
output_shape,
data_format)
@keras_export('keras.backend.bias_add')
@dispatch.add_dispatch_support
def bias_add(x, bias, data_format=None):
"""Adds a bias vector to a tensor.
Arguments:
x: Tensor or variable.
bias: Bias tensor to add.
data_format: string, `"channels_last"` or `"channels_first"`.
Returns:
Output tensor.
Raises:
ValueError: In one of the two cases below:
1. invalid `data_format` argument.
2. invalid bias shape.
the bias should be either a vector or
a tensor with ndim(x) - 1 dimension
"""
if data_format is None:
data_format = image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ' + str(data_format))
bias_shape = int_shape(bias)
if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1:
raise ValueError(
'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' %
(len(bias_shape), ndim(x)))
if len(bias_shape) == 1:
if data_format == 'channels_first':
return nn.bias_add(x, bias, data_format='NCHW')
return nn.bias_add(x, bias, data_format='NHWC')
if ndim(x) in (3, 4, 5):
if data_format == 'channels_first':
bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1]
return x + reshape(bias, bias_reshape_axis)
return x + reshape(bias, (1,) + bias_shape)
return nn.bias_add(x, bias)
# RANDOMNESS
@keras_export('keras.backend.random_normal')
@dispatch.add_dispatch_support
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with normal distribution of values.
It is an alias to `tf.random.normal`.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: A float, the mean value of the normal distribution to draw samples.
Default to 0.0.
stddev: A float, the standard deviation of the normal distribution
to draw samples. Default to 1.0.
dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras
backend dtype which is float32.
seed: Integer, random seed. Will use a random numpy integer when not
specified.
Returns:
A tensor with normal distribution of values.
Example:
>>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3),
... mean=0.0, stddev=1.0)
>>> random_normal_tensor
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_normal(
shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
@keras_export('keras.backend.random_uniform')
@dispatch.add_dispatch_support
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
"""Returns a tensor with uniform distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
minval: A float, lower boundary of the uniform distribution
to draw samples.
maxval: A float, upper boundary of the uniform distribution
to draw samples.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
Example:
>>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3),
... minval=0.0, maxval=1.0)
>>> random_uniform_tensor
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.random_uniform(
shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
@deprecated(None, 'Use `tf.keras.backend.random_bernoulli` instead.')
@keras_export('keras.backend.random_binomial')
@dispatch.add_dispatch_support
def random_binomial(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random binomial distribution of values.
DEPRECATED, use `tf.keras.backend.random_bernoulli` instead.
The binomial distribution with parameters `n` and `p` is the probability
distribution of the number of successful Bernoulli process. Only supports
`n` = 1 for now.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of binomial distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
Example:
>>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3),
... p=0.5)
>>> random_binomial_tensor
<tf.Tensor: shape=(2, 3), dtype=float32, numpy=...,
dtype=float32)>
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return array_ops.where_v2(
random_ops.random_uniform(shape, dtype=dtype, seed=seed) <= p,
array_ops.ones(shape, dtype=dtype), array_ops.zeros(shape, dtype=dtype))
@keras_export('keras.backend.random_bernoulli')
@dispatch.add_dispatch_support
def random_bernoulli(shape, p=0.0, dtype=None, seed=None):
"""Returns a tensor with random bernoulli distribution of values.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
p: A float, `0. <= p <= 1`, probability of bernoulli distribution.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
return random_binomial(shape, p, dtype, seed)
@keras_export('keras.backend.truncated_normal')
@dispatch.add_dispatch_support
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
"""Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution
with specified mean and standard deviation,
except that values whose magnitude is more than
two standard deviations from the mean are dropped and re-picked.
Arguments:
shape: A tuple of integers, the shape of tensor to create.
mean: Mean of the values.
stddev: Standard deviation of the values.
dtype: String, dtype of returned tensor.
seed: Integer, random seed.
Returns:
A tensor.
"""
if dtype is None:
dtype = floatx()
if seed is None:
seed = np.random.randint(10e6)
return random_ops.truncated_normal(
shape, mean, stddev, dtype=dtype, seed=seed)
# CTC
# TensorFlow has a native implementation, but it uses sparse tensors
# and therefore requires a wrapper for Keras. The functions below convert
# dense to sparse tensors and also wraps up the beam search code that is
# in TensorFlow's CTC implementation
@keras_export('keras.backend.ctc_label_dense_to_sparse')
@dispatch.add_dispatch_support
def ctc_label_dense_to_sparse(labels, label_lengths):
"""Converts CTC labels from dense to sparse.
Arguments:
labels: dense CTC labels.
label_lengths: length of the labels.
Returns:
A sparse tensor representation of the labels.
"""
label_shape = array_ops.shape(labels)
num_batches_tns = array_ops.stack([label_shape[0]])
max_num_labels_tns = array_ops.stack([label_shape[1]])
def range_less_than(old_input, current_input):
return array_ops.expand_dims(
math_ops.range(array_ops.shape(old_input)[1]), 0) < array_ops.fill(
max_num_labels_tns, current_input)
init = math_ops.cast(
array_ops.fill([1, label_shape[1]], 0), dtypes_module.bool)
dense_mask = functional_ops.scan(
range_less_than, label_lengths, initializer=init, parallel_iterations=1)
dense_mask = dense_mask[:, 0, :]
label_array = array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[1]), num_batches_tns),
label_shape)
label_ind = array_ops.boolean_mask(label_array, dense_mask)
batch_array = array_ops.transpose(
array_ops.reshape(
array_ops.tile(math_ops.range(0, label_shape[0]), max_num_labels_tns),
reverse(label_shape, 0)))
batch_ind = array_ops.boolean_mask(batch_array, dense_mask)
indices = array_ops.transpose(
array_ops.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1]))
vals_sparse = array_ops.gather_nd(labels, indices)
return sparse_tensor.SparseTensor(
math_ops.cast(indices, dtypes_module.int64), vals_sparse,
math_ops.cast(label_shape, dtypes_module.int64))
@keras_export('keras.backend.ctc_batch_cost')
@dispatch.add_dispatch_support
def ctc_batch_cost(y_true, y_pred, input_length, label_length):
"""Runs CTC loss algorithm on each batch element.
Arguments:
y_true: tensor `(samples, max_string_length)`
containing the truth labels.
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_pred`.
label_length: tensor `(samples, 1)` containing the sequence length for
each batch item in `y_true`.
Returns:
Tensor with shape (samples,1) containing the
CTC loss of each element.
"""
label_length = math_ops.cast(
array_ops.squeeze(label_length, axis=-1), dtypes_module.int32)
input_length = math_ops.cast(
array_ops.squeeze(input_length, axis=-1), dtypes_module.int32)
sparse_labels = math_ops.cast(
ctc_label_dense_to_sparse(y_true, label_length), dtypes_module.int32)
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
return array_ops.expand_dims(
ctc.ctc_loss(
inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
@keras_export('keras.backend.ctc_decode')
@dispatch.add_dispatch_support
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1):
"""Decodes the output of a softmax.
Can use either greedy search (also known as best path)
or a constrained dictionary search.
Arguments:
y_pred: tensor `(samples, time_steps, num_categories)`
containing the prediction, or output of the softmax.
input_length: tensor `(samples, )` containing the sequence length for
each batch item in `y_pred`.
greedy: perform much faster best-path search if `true`.
This does not use a dictionary.
beam_width: if `greedy` is `false`: a beam search decoder will be used
with a beam of this width.
top_paths: if `greedy` is `false`,
how many of the most probable paths will be returned.
Returns:
Tuple:
List: if `greedy` is `true`, returns a list of one element that
contains the decoded sequence.
If `false`, returns the `top_paths` most probable
decoded sequences.
Each decoded sequence has shape (samples, time_steps).
Important: blank labels are returned as `-1`.
Tensor `(top_paths, )` that contains
the log probability of each decoded sequence.
"""
input_shape = shape(y_pred)
num_samples, num_steps = input_shape[0], input_shape[1]
y_pred = math_ops.log(array_ops.transpose(y_pred, perm=[1, 0, 2]) + epsilon())
input_length = math_ops.cast(input_length, dtypes_module.int32)
if greedy:
(decoded, log_prob) = ctc.ctc_greedy_decoder(
inputs=y_pred, sequence_length=input_length)
else:
(decoded, log_prob) = ctc.ctc_beam_search_decoder(
inputs=y_pred,
sequence_length=input_length,
beam_width=beam_width,
top_paths=top_paths)
decoded_dense = []
for st in decoded:
st = sparse_tensor.SparseTensor(
st.indices, st.values, (num_samples, num_steps))
decoded_dense.append(
sparse_ops.sparse_tensor_to_dense(sp_input=st, default_value=-1))
return (decoded_dense, log_prob)
# HIGH ORDER FUNCTIONS
@keras_export('keras.backend.map_fn')
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return map_fn_lib.map_fn(fn, elems, name=name, dtype=dtype)
@keras_export('keras.backend.foldl')
def foldl(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from left to right.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[0]` in case of None)
name: A string name for the foldl node in the graph
Returns:
Tensor with same type and shape as `initializer`.
"""
return functional_ops.foldl(fn, elems, initializer=initializer, name=name)
@keras_export('keras.backend.foldr')
def foldr(fn, elems, initializer=None, name=None):
"""Reduce elems using fn to combine them from right to left.
Arguments:
fn: Callable that will be called upon each element in elems and an
accumulator, for instance `lambda acc, x: acc + x`
elems: tensor
initializer: The first value used (`elems[-1]` in case of None)
name: A string name for the foldr node in the graph
Returns:
Same type and shape as initializer
"""
return functional_ops.foldr(fn, elems, initializer=initializer, name=name)
# Load Keras default configuration from config file if present.
# Set Keras base dir path given KERAS_HOME env variable, if applicable.
# Otherwise either ~/.keras or /tmp.
if 'KERAS_HOME' in os.environ:
_keras_dir = os.environ.get('KERAS_HOME')
else:
_keras_base_dir = os.path.expanduser('~')
_keras_dir = os.path.join(_keras_base_dir, '.keras')
_config_path = os.path.expanduser(os.path.join(_keras_dir, 'keras.json'))
if os.path.exists(_config_path):
try:
with open(_config_path) as fh:
_config = json.load(fh)
except ValueError:
_config = {}
_floatx = _config.get('floatx', floatx())
assert _floatx in {'float16', 'float32', 'float64'}
_epsilon = _config.get('epsilon', epsilon())
assert isinstance(_epsilon, float)
_image_data_format = _config.get('image_data_format', image_data_format())
assert _image_data_format in {'channels_last', 'channels_first'}
set_floatx(_floatx)
set_epsilon(_epsilon)
set_image_data_format(_image_data_format)
# Save config file.
if not os.path.exists(_keras_dir):
try:
os.makedirs(_keras_dir)
except OSError:
# Except permission denied and potential race conditions
# in multi-threaded environments.
pass
if not os.path.exists(_config_path):
_config = {
'floatx': floatx(),
'epsilon': epsilon(),
'backend': 'tensorflow',
'image_data_format': image_data_format()
}
try:
with open(_config_path, 'w') as f:
f.write(json.dumps(_config, indent=4))
except IOError:
# Except permission denied.
pass
def configure_and_create_distributed_session(distribution_strategy):
"""Configure session config and create a session with it."""
def _create_session(distribution_strategy):
"""Create the Distributed Strategy session."""
session_config = get_default_session_config()
# If a session already exists, merge in its config; in the case there is a
# conflict, take values of the existing config.
global _SESSION
if getattr(_SESSION, 'session', None) and _SESSION.session._config:
session_config.MergeFrom(_SESSION.session._config)
if is_tpu_strategy(distribution_strategy):
# TODO(priyag, yuefengz): Remove this workaround when Distribute
# Coordinator is integrated with keras and we can create a session from
# there.
distribution_strategy.configure(session_config)
master = distribution_strategy.extended._tpu_cluster_resolver.master() # pylint: disable=protected-access
session = session_module.Session(config=session_config, target=master)
else:
worker_context = dc_context.get_current_worker_context()
if worker_context:
dc_session_config = worker_context.session_config
# Merge the default session config to the one from distribute
# coordinator, which is fine for now since they don't have
# conflicting configurations.
dc_session_config.MergeFrom(session_config)
session = session_module.Session(
config=dc_session_config, target=worker_context.master_target)
else:
distribution_strategy.configure(session_config)
session = session_module.Session(config=session_config)
set_session(session)
if distribution_strategy.extended._in_multi_worker_mode():
dc.run_distribute_coordinator(
_create_session,
distribution_strategy,
mode=dc.CoordinatorMode.INDEPENDENT_WORKER)
else:
_create_session(distribution_strategy)
def is_tpu_strategy(strategy):
"""We're executing TPU Strategy."""
return (strategy is not None and
strategy.__class__.__name__.startswith('TPUStrategy'))
def cast_variables_to_tensor(tensors):
def _cast_variables_to_tensor(tensor):
if isinstance(tensor, variables_module.Variable):
return array_ops.identity(tensor)
return tensor
return nest.map_structure(_cast_variables_to_tensor, tensors)
def _is_symbolic_tensor(x):
return tensor_util.is_tensor(x) and not isinstance(x, ops.EagerTensor)
def convert_inputs_if_ragged(inputs):
"""Converts any ragged tensors to dense."""
def _convert_ragged_input(inputs):
if isinstance(inputs, ragged_tensor.RaggedTensor):
return inputs.to_tensor()
return inputs
flat_inputs = nest.flatten(inputs)
contains_ragged = py_any(
isinstance(i, ragged_tensor.RaggedTensor) for i in flat_inputs)
if not contains_ragged:
return inputs, None
inputs = nest.map_structure(_convert_ragged_input, inputs)
# Multiple mask are not yet supported, so one mask is used on all inputs.
# We approach this similarly when using row lengths to ignore steps.
nested_row_lengths = math_ops.cast(flat_inputs[0].nested_row_lengths()[0],
'int32')
return inputs, nested_row_lengths
def maybe_convert_to_ragged(is_ragged_input, output, nested_row_lengths):
"""Converts any ragged input back to its initial structure."""
if not is_ragged_input:
return output
return ragged_tensor.RaggedTensor.from_tensor(output, nested_row_lengths)
class ContextValueCache(weakref.WeakKeyDictionary):
"""Container that caches (possibly tensor) values based on the context.
This class is similar to defaultdict, where values may be produced by the
default factory specified during initialization. This class also has a default
value for the key (when key is `None`) -- the key is set to the the current
graph or eager context. The default factories for key and value are only used
in `__getitem__` and `setdefault`. The `.get()` behavior remains the same.
This object will return the value of the current graph or closest parent graph
if the current graph is a function. This is to reflect the fact that if a
tensor is created in eager/graph, child functions may capture that tensor.
The default factory method may accept keyword arguments (unlike defaultdict,
which only accepts callables with 0 arguments). To pass keyword arguments to
`default_factory`, use the `setdefault` method instead of `__getitem__`.
An example of how this class can be used in different contexts:
```
cache = ContextValueCache(int)
# Eager mode
cache[None] += 2
cache[None] += 4
assert cache[None] == 6
# Graph mode
with tf.Graph().as_default() as g:
cache[None] += 5
cache[g] += 3
assert cache[g] == 8
```
Example of a default factory with arguments:
```
cache = ContextValueCache(lambda x: x + 1)
g = tf.get_default_graph()
# Example with keyword argument.
value = cache.setdefault(key=g, kwargs={'x': 3})
assert cache[g] == 4
```
"""
def __init__(self, default_factory):
self.default_factory = default_factory
weakref.WeakKeyDictionary.__init__(self)
def _key(self):
if context.executing_eagerly():
return _DUMMY_EAGER_GRAPH.key
else:
return ops.get_default_graph()
def _get_parent_graph(self, graph):
"""Returns the parent graph or dummy eager object."""
# TODO(b/149317164): Currently FuncGraphs use ops.get_default_graph() as the
# outer graph. This results in outer_graph always being a Graph,
# even in eager mode (get_default_graph will create a new Graph if there
# isn't a default graph). Because of this bug, we have to specially set the
# key when eager execution is enabled.
parent_graph = graph.outer_graph
if (not isinstance(parent_graph, func_graph.FuncGraph) and
ops.executing_eagerly_outside_functions()):
return _DUMMY_EAGER_GRAPH.key
return parent_graph
def _get_recursive(self, key):
"""Gets the value at key or the closest parent graph."""
value = self.get(key)
if value is not None:
return value
# Since FuncGraphs are able to capture tensors and variables from their
# parent graphs, recursively search to see if there is a value stored for
# one of the parent graphs.
if isinstance(key, func_graph.FuncGraph):
return self._get_recursive(self._get_parent_graph(key))
return None
def __getitem__(self, key):
"""Gets the value at key (or current context), or sets default value.
Args:
key: May be `None` or `Graph`object. When `None`, the key is set to the
current context.
Returns:
Either the cached or default value.
"""
if key is None:
key = self._key()
value = self._get_recursive(key)
if value is None:
value = self[key] = self.default_factory() # pylint:disable=not-callable
return value
def setdefault(self, key=None, default=None, kwargs=None):
"""Sets the default value if key is not in dict, and returns the value."""
if key is None:
key = self._key()
kwargs = kwargs or {}
if default is None and key not in self:
default = self.default_factory(**kwargs)
return weakref.WeakKeyDictionary.setdefault(self, key, default)
# This dictionary holds a mapping {graph: learning_phase}. In eager mode, a
# dummy object is used.
# A learning phase is a bool tensor used to run Keras models in
# either train mode (learning_phase == 1) or test mode (learning_phase == 0).
_GRAPH_LEARNING_PHASES = ContextValueCache(_default_learning_phase)
# This dictionary holds a mapping between a graph and variables to initialize
# in the graph.
_GRAPH_VARIABLES = ContextValueCache(object_identity.ObjectIdentityWeakSet)
# This dictionary holds a mapping between a graph and TF optimizers created in
# the graph.
_GRAPH_TF_OPTIMIZERS = ContextValueCache(object_identity.ObjectIdentityWeakSet)
| 31.561398 | 112 | 0.673438 |
9057cf294c414b3842f8a6e2f3171f784e30cffc | 2,828 | py | Python | MATH-2305-Final_Project-Fall-2020/functions.py | tranmike270/Math-2305-Final-Project | 2651800661c963b7124cea8395d0783dce923bed | [
"Unlicense"
] | null | null | null | MATH-2305-Final_Project-Fall-2020/functions.py | tranmike270/Math-2305-Final-Project | 2651800661c963b7124cea8395d0783dce923bed | [
"Unlicense"
] | null | null | null | MATH-2305-Final_Project-Fall-2020/functions.py | tranmike270/Math-2305-Final-Project | 2651800661c963b7124cea8395d0783dce923bed | [
"Unlicense"
] | null | null | null | import networkx as nx
def V(G):
"""
Returns a set of verticies on a graph
Parameters
----------
G = A networkx graph.
Returns
-------
set of vertices belonging to graph 'G'
"""
return set(G.nodes())
def E(G):
"""
Returns a set of edges on a graph
Parameters
----------
G = A networkx graph.
Returns
-------
set of edges belonging to graph graph
"""
return set(G.edges())
def prims_initialize(graph, v):
"""
Returns a subgraph while making sure the vertex is found on the graph.
Parameters
----------
graph = A networkx graph.
v = int value of a vertex
Returns
-------
T a subgraph of G. Including nodes, edges and weights.
"""
if v not in V(graph):
return 'Error vertex not found'
else:
T = nx.Graph()
T.add_node(v)
return T
def cost(graph,e):
"""
Return the cost of an edge on a graph.
Parameters
----------
G = A networkx graph.
e = An edge on graph.
Returns
-------
The weight of an edge 'e' on a graph 'graph'.
"""
return graph.edges[e]['weight']
def is_spanning(graph, subgraph):
"""
Return True or False by passing graph and subgraph through function V
to check if the subgraph uses all verticies of the original graph
Parameters
----------
graph = A networkx graph.
subgraph = A networkx subgraph of 'graph'.
Returns
-------
True if the subgraph is spanning.
False if the subgraph is not spanning.
"""
return V(graph) == V(subgraph)
def incident_edges(G, T):
"""
Returns the valid incident edges.
An incident edge is an edge that is adjacent to the node that is
being evaluated. An edge is valid when both endpoints are not not already in
the set containting tree nodes.
Parameters
----------
G = A networkx graph.
T = A networkx subgraph of 'graph'.
Returns
-------
A set of valid incident edges.
"""
return [e for e in list(G.edges(V(T)))
if e[0] not in V(T) or e[1] not in V(T)]
def min_prims_edge(G, T):
"""
Return the minimum cost incident edge from a set created by the incident_edge
function. Uses the cost function to determine the minimum cost edge.
Parameters
----------
G = A networkx graph.
T = A networkx subgraph of 'graph'.
Returns
-------
The weight of an edge 'e' on a graph 'graph'.
"""
possible_e = incident_edges(G,T)
min_e = possible_e[0]
for e in possible_e:
if cost(G,e) < cost(G,min_e):
min_e = e
return min_e
| 20.642336 | 82 | 0.547383 |
60be2b0daf1c3f10fc3761354835fdcad89075ea | 19,091 | py | Python | lib/visual_envs.py | benlansdell/Meta-RL | c7320bb3ffa8e86ad5d672d10267cc7f4813bdcb | [
"MIT"
] | 6 | 2020-03-31T02:10:25.000Z | 2022-01-06T19:27:16.000Z | lib/visual_envs.py | benlansdell/Meta-RL | c7320bb3ffa8e86ad5d672d10267cc7f4813bdcb | [
"MIT"
] | null | null | null | lib/visual_envs.py | benlansdell/Meta-RL | c7320bb3ffa8e86ad5d672d10267cc7f4813bdcb | [
"MIT"
] | null | null | null | import numpy as np
import random
import itertools
import scipy.ndimage
import scipy.misc
import matplotlib.pyplot as plt
from numpy.random import rand
from scipy.ndimage import gaussian_filter
class gameOb():
def __init__(self,coordinates,size,color,reward,name):
self.x = coordinates[0]
self.y = coordinates[1]
self.size = size
self.color = color
self.reward = reward
self.name = name
#To implement:
# VisualIntEnv
class VisualIntEnv():
def __init__(self, size, delay = 1, p1 = 0.1, p2 = 0.01, p3 = 0.01, int_p2 = 0.1, int_p3 = 0.1, max_steps = 20, obs_steps= 19, chain_prob = 0.5):
self.sizeX = size
self.sizeY = size
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.int_p2 = int_p2
self.int_p3 = int_p3
self.alpha = 0.5
self.max_steps = max_steps
self.obs_steps = obs_steps
self.actions = 2
self.N = 3 #Number of objects
self.bg = np.zeros([size,size])
self.chain_prob = chain_prob
self.reset()
def reset(self):
#Choose the topology randomly with each reset
self.is_chain = rand() > self.chain_prob
self.timestep = 0
self.state = np.zeros(self.N+1)
rendered_state, rendered_state_big = self.renderEnv()
self.xhistory = np.zeros((self.max_steps, self.N+1))
return rendered_state, rendered_state_big
def renderEnv(self):
s = np.zeros([self.sizeY,self.sizeX])
#For each object... find a location and render its state
for idx in range(self.N):
obj_x = int((idx * self.sizeX)/float(self.N))
obj_y = obj_x
s[obj_y, obj_x] = self.state[idx]
#Plot response indicator
s_i = np.zeros([self.sizeY,self.sizeX])
obj_x = int(self.sizeX/float(self.N))
obj_y = 0
s_i[obj_y, obj_x] = self.state[-1]
a = gaussian_filter(s, sigma = 1, mode = 'wrap')
a = np.tile(a[:,:,None], (1,1,3))
#Add response indicator pixels to red channel
a[:,:,0] += s_i
a_big = scipy.misc.imresize(a, [32,32,3], interp='nearest')
return a, a_big
def step(self,action):
#Choose spontaneous activity
y1 = rand() < self.p1
y2 = rand() < self.p2
y3 = rand() < self.p3
#Introduce interventions that help distinguish the two causal graphs
z2 = (action == 0)
z3 = (action == 1)
##########
#Dynamics#
##########
#Choose if node A is active
x1 = y1
#Choose if node B is active
x2 = y2 + (1-y2)*self.xhistory[max(0, self.timestep - 1), 0]
if z2: #Overwrite if intervening
x2 = 1
#Depending on topology, choose if node C is active
if self.is_chain:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 1), 1]
else:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 2), 0]
if z3: #Overwrite if intervening
x3 = 1
y1 = 1. if self.timestep >= self.obs_steps else 0.
state = np.array([x1, x2, x3, y1])
#Decay
self.state = np.minimum(1, (1-self.alpha)*self.state + self.alpha*state)
self.xhistory[self.timestep, :] = self.state
self.timestep += 1
if self.timestep >= self.max_steps:
done = True
else:
done = False
#If in the 'action phase', then the action is meant to indicate which topology it thinks is correct
if self.timestep >= self.obs_steps:
reward = float(action == self.is_chain)
else:
reward = 0.0
#Render states to agent to see....
rendered_state, rendered_state_big = self.renderEnv()
return rendered_state, rendered_state_big, reward, done
class VisualObsIntEnv():
def __init__(self, size, delay = 1, p1 = 0.1, p2 = 0.01, p3 = 0.01, int_p2 = 0.1, int_p3 = 0.1, max_steps = 20, obs_steps= 20, chain_prob = 0.5):
self.sizeX = size
self.sizeY = size
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.int_p2 = int_p2
self.int_p3 = int_p3
self.alpha = 0.5
self.max_steps = max_steps
self.obs_steps = obs_steps
self.actions = 2
self.N = 3 #Number of objects
self.bg = np.zeros([size,size])
self.chain_prob = chain_prob
a,a_big = self.reset()
plt.imshow(a_big,interpolation="nearest")
def reset(self):
#Choose the topology randomly with each reset
self.is_chain = rand() > self.chain_prob
self.timestep = 0
self.state = np.zeros(self.N+2)
rendered_state, rendered_state_big = self.renderEnv()
self.xhistory = np.zeros((self.max_steps, self.N+2))
return rendered_state, rendered_state_big
def renderEnv(self):
s = np.zeros([self.sizeY,self.sizeX])
#For each object... find a location and render its state
for idx in range(self.N):
obj_x = int((idx * self.sizeX)/float(self.N))
obj_y = obj_x
s[obj_y, obj_x] = self.state[idx]
#For the intervention objects... plot those too!
s_i = np.zeros([self.sizeY,self.sizeX])
for idx in range(2):
obj_x = int(((idx+1) * self.sizeX)/float(self.N))
obj_y = int((idx * self.sizeX)/float(self.N))
s_i[obj_y, obj_x] = self.state[self.N+idx]
a = gaussian_filter(s, sigma = 1, mode = 'wrap')
a = np.tile(a[:,:,None], (1,1,3))
#Add intervention indicator pixels to red channel
a[:,:,0] += s_i
a_big = scipy.misc.imresize(a, [32,32,3], interp='nearest')
return a, a_big
def step(self,action):
#Choose spontaneous activity
y1 = rand() < self.p1
y2 = rand() < self.p2
y3 = rand() < self.p3
#Introduce interventions that help distinguish the two causal graphs
z2 = rand() < self.int_p2
z3 = rand() < self.int_p3
##########
#Dynamics#
##########
#Choose if node A is active
x1 = y1
#Choose if node B is active
x2 = y2 + (1-y2)*self.xhistory[max(0, self.timestep - 1), 0]
if z2: #Overwrite if intervening
x2 = 1
#Depending on topology, choose if node C is active
if self.is_chain:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 1), 1]
else:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 2), 0]
if z3: #Overwrite if intervening
x3 = 1
state = np.array([x1, x2, x3, z2, z3])
#Decay
self.state = np.minimum(1, (1-self.alpha)*self.state + self.alpha*state)
self.xhistory[self.timestep, :] = self.state
self.timestep += 1
if self.timestep >= self.max_steps:
done = True
else:
done = False
#If in the 'action phase', then the action is meant to indicate which topology it thinks is correct
if self.timestep >= self.obs_steps:
reward = float(action == self.is_chain)
else:
reward = 0.0
#Render states to agent to see....
rendered_state, rendered_state_big = self.renderEnv()
return rendered_state, rendered_state_big, reward, done
class VisualObsEnv():
def __init__(self, size, delay = 1, p1 = 0.1, p2 = 0.01, p3 = 0.01, int_p2 = 0.1, int_p3 = 0.1, max_steps = 20, obs_steps= 20, chain_prob = 0.5):
self.sizeX = size
self.sizeY = size
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.int_p2 = int_p2
self.int_p3 = int_p3
self.alpha = 0.5
self.max_steps = max_steps
self.obs_steps = obs_steps
self.actions = 2
self.N = 3 #Number of objects
self.bg = np.zeros([size,size])
self.chain_prob = chain_prob
a,a_big = self.reset()
plt.imshow(a_big,interpolation="nearest")
def reset(self):
#Choose the topology randomly with each reset
self.is_chain = rand() > self.chain_prob
self.timestep = 0
self.state = np.zeros(self.N)
rendered_state, rendered_state_big = self.renderEnv()
self.xhistory = np.zeros((self.max_steps, self.N))
return rendered_state, rendered_state_big
def renderEnv(self):
s = np.zeros([self.sizeY,self.sizeX])
#For each object... find a location and render its state
for idx in range(self.N):
obj_x = int((idx * self.sizeX)/float(self.N))
obj_y = obj_x
s[obj_y, obj_x] = self.state[idx]
a = gaussian_filter(s, sigma = 1, mode = 'wrap')
a = np.tile(a[:,:,None], (1,1,3))
a_big = scipy.misc.imresize(a, [32,32,3], interp='nearest')
return a, a_big
def step(self,action):
#Choose spontaneous activity
y1 = rand() < self.p1
y2 = rand() < self.p2
y3 = rand() < self.p3
#Introduce interventions that help distinguish the two causal graphs
z2 = rand() < self.int_p2
z3 = rand() < self.int_p3
##########
#Dynamics#
##########
#Choose if node A is active
x1 = y1
#Choose if node B is active
x2 = y2 + (1-y2)*self.xhistory[max(0, self.timestep - 1), 0]
if z2: #Overwrite if intervening
x2 = 1
#Depending on topology, choose if node C is active
if self.is_chain:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 1), 1]
else:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 2), 0]
if z3: #Overwrite if intervening
x3 = 1
state = np.array([x1, x2, x3])
#Decay
self.state = np.minimum(1, (1-self.alpha)*self.state + self.alpha*state)
self.xhistory[self.timestep, :] = self.state
self.timestep += 1
if self.timestep >= self.max_steps:
done = True
else:
done = False
#If in the 'action phase', then the action is meant to indicate which topology it thinks is correct
if self.timestep >= self.obs_steps:
reward = float(action == self.is_chain)
else:
reward = 0.0
#Render states to agent to see....
rendered_state, rendered_state_big = self.renderEnv()
return rendered_state, rendered_state_big, reward, done
#Here not enough information is provided to solve the problem
#Three objects.
class VisualConfoundedEnv():
def __init__(self, size, delay = 1, p1 = 0.1, p2 = 0.01, p3 = 0.01, max_steps = 20, obs_steps= 20, chain_prob = 0.5):
self.sizeX = size
self.sizeY = size
self.p1 = p1
self.p2 = p2
self.p3 = p3
self.alpha = 0.5
self.max_steps = max_steps
self.obs_steps = obs_steps
self.actions = 2
self.N = 3 #Number of objects
self.bg = np.zeros([size,size])
self.chain_prob = chain_prob
a,a_big = self.reset()
plt.imshow(a_big,interpolation="nearest")
def reset(self):
#Choose the topology randomly with each reset
self.is_chain = rand() > self.chain_prob
self.timestep = 0
self.state = np.zeros(self.N)
rendered_state, rendered_state_big = self.renderEnv()
self.xhistory = np.zeros((self.max_steps, self.N))
return rendered_state, rendered_state_big
def renderEnv(self):
s = np.zeros([self.sizeY,self.sizeX])
#For each object... find a location and render its state
for idx in range(self.N):
obj_x = int((idx * self.sizeX)/float(self.N))
obj_y = obj_x
s[obj_y, obj_x] = self.state[idx]
a = gaussian_filter(s, sigma = 1, mode = 'wrap')
a = np.tile(a[:,:,None], (1,1,3))
a_big = scipy.misc.imresize(a, [32,32,3], interp='nearest')
return a, a_big
def step(self,action):
#Choose spontaneous activity
y1 = rand() < self.p1
y2 = rand() < self.p2
y3 = rand() < self.p3
##########
#Dynamics#
##########
#Choose if node A is active
x1 = y1
#Choose if node B is active
x2 = y2 + (1-y2)*self.xhistory[max(0, self.timestep - 1), 0]
#Depending on topology, choose if node C is active
if self.is_chain:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 1), 1]
else:
x3 = y3 + (1-y3)*self.xhistory[max(0, self.timestep - 2), 0]
state = np.array([x1, x2, x3])
#Decay
self.state = np.minimum(1, (1-self.alpha)*self.state + self.alpha*state)
self.xhistory[self.timestep, :] = self.state
self.timestep += 1
if self.timestep >= self.max_steps:
done = True
else:
done = False
#If in the 'action phase', then the action is meant to indicate which topology it thinks is correct
if self.timestep >= self.obs_steps:
reward = float(action == self.is_chain)
else:
reward = 0.0
#Render states to agent to see....
rendered_state, rendered_state_big = self.renderEnv()
return rendered_state, rendered_state_big, reward, done
class gameEnv():
def __init__(self,partial,size,goal_color):
self.sizeX = size
self.sizeY = size
self.actions = 4
self.objects = []
self.partial = partial
self.bg = np.zeros([size,size])
a,a_big = self.reset(goal_color)
plt.imshow(a_big,interpolation="nearest")
def getFeatures(self):
return np.array([self.objects[0].x,self.objects[0].y]) / float(self.sizeX)
def reset(self,goal_color):
self.objects = []
self.goal_color = goal_color
self.other_color = [1 - a for a in self.goal_color]
self.orientation = 0
self.hero = gameOb(self.newPosition(0),1,[0,0,1],None,'hero')
self.objects.append(self.hero)
for i in range(self.sizeX-1):
bug = gameOb(self.newPosition(0),1,self.goal_color,1,'goal')
self.objects.append(bug)
for i in range(self.sizeX-1):
hole = gameOb(self.newPosition(0),1,self.other_color,0,'fire')
self.objects.append(hole)
state,s_big = self.renderEnv()
self.state = state
return state,s_big
def moveChar(self,action):
# 0 - up, 1 - down, 2 - left, 3 - right, 4 - 90 counter-clockwise, 5 - 90 clockwise
hero = self.objects[0]
blockPositions = [[-1,-1]]
for ob in self.objects:
if ob.name == 'block': blockPositions.append([ob.x,ob.y])
blockPositions = np.array(blockPositions)
heroX = hero.x
heroY = hero.y
penalize = 0.
if action < 4 :
if self.orientation == 0:
direction = action
if self.orientation == 1:
if action == 0: direction = 1
elif action == 1: direction = 0
elif action == 2: direction = 3
elif action == 3: direction = 2
if self.orientation == 2:
if action == 0: direction = 3
elif action == 1: direction = 2
elif action == 2: direction = 0
elif action == 3: direction = 1
if self.orientation == 3:
if action == 0: direction = 2
elif action == 1: direction = 3
elif action == 2: direction = 1
elif action == 3: direction = 0
if direction == 0 and hero.y >= 1 and [hero.x,hero.y - 1] not in blockPositions.tolist():
hero.y -= 1
if direction == 1 and hero.y <= self.sizeY-2 and [hero.x,hero.y + 1] not in blockPositions.tolist():
hero.y += 1
if direction == 2 and hero.x >= 1 and [hero.x - 1,hero.y] not in blockPositions.tolist():
hero.x -= 1
if direction == 3 and hero.x <= self.sizeX-2 and [hero.x + 1,hero.y] not in blockPositions.tolist():
hero.x += 1
if hero.x == heroX and hero.y == heroY:
penalize = 0.0
self.objects[0] = hero
return penalize
def newPosition(self,sparcity):
iterables = [ range(self.sizeX), range(self.sizeY)]
points = []
for t in itertools.product(*iterables):
points.append(t)
for objectA in self.objects:
if (objectA.x,objectA.y) in points: points.remove((objectA.x,objectA.y))
location = np.random.choice(range(len(points)),replace=False)
return points[location]
def checkGoal(self):
hero = self.objects[0]
others = self.objects[1:]
ended = False
for other in others:
if hero.x == other.x and hero.y == other.y and hero != other:
self.objects.remove(other)
if other.reward == 1:
self.objects.append(gameOb(self.newPosition(0),1,self.goal_color,1,'goal'))
return other.reward,False
else:
self.objects.append(gameOb(self.newPosition(0),1,self.other_color,0,'fire'))
return other.reward,False
if ended == False:
return 0.0,False
def renderEnv(self):
if self.partial == True:
padding = 2
a = np.ones([self.sizeY+(padding*2),self.sizeX+(padding*2),3])
a[padding:-padding,padding:-padding,:] = 0
a[padding:-padding,padding:-padding,:] += np.dstack([self.bg,self.bg,self.bg])
else:
a = np.zeros([self.sizeY,self.sizeX,3])
padding = 0
a += np.dstack([self.bg,self.bg,self.bg])
hero = self.objects[0]
for item in self.objects:
a[item.y+padding:item.y+item.size+padding,item.x+padding:item.x+item.size+padding,:] = item.color
#if item.name == 'hero':
# hero = item
if self.partial == True:
a = a[(hero.y):(hero.y+(padding*2)+hero.size),(hero.x):(hero.x+(padding*2)+hero.size),:]
a_big = scipy.misc.imresize(a,[32,32,3],interp='nearest')
return a,a_big
def step(self,action):
penalty = self.moveChar(action)
reward,done = self.checkGoal()
state,s_big = self.renderEnv()
if reward == None:
print(done)
print(reward)
print(penalty)
return state,(reward+penalty),done
else:
goal = None
for ob in self.objects:
if ob.name == 'goal':
goal = ob
return state,s_big,(reward+penalty),done,[self.objects[0].y,self.objects[0].x],[goal.y,goal.x]
| 36.7842 | 149 | 0.550993 |
07f2803100ebfd53d93738353ac52652bf9e72ae | 1,735 | py | Python | tests/test_class_oelint_file_underscores.py | HerrMuellerluedenscheid/oelint-adv | 90ad0a9e385d863af85869f06750aa5d2440e986 | [
"BSD-2-Clause"
] | 22 | 2019-06-10T00:40:07.000Z | 2022-01-18T19:59:47.000Z | tests/test_class_oelint_file_underscores.py | HerrMuellerluedenscheid/oelint-adv | 90ad0a9e385d863af85869f06750aa5d2440e986 | [
"BSD-2-Clause"
] | 274 | 2019-03-07T06:00:27.000Z | 2022-03-27T10:22:10.000Z | tests/test_class_oelint_file_underscores.py | HerrMuellerluedenscheid/oelint-adv | 90ad0a9e385d863af85869f06750aa5d2440e986 | [
"BSD-2-Clause"
] | 17 | 2019-08-24T23:04:39.000Z | 2021-11-02T19:18:19.000Z | import os
import sys
import pytest
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from base import TestBaseClass
class TestClassOelintFileUnderscores(TestBaseClass):
@pytest.mark.parametrize('id', ['oelint.file.underscores'])
@pytest.mark.parametrize('occurrence', [1])
@pytest.mark.parametrize('input',
[
{
'oelint_adv-test.bb':
'''
VAR = "1"
'''
},
{
'oelintadvtest.bb':
'''
VAR = "1"
'''
},
{
'oelint_adv_test.bb':
'''
VAR = "1"
'''
}
],
)
def test_bad(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
@pytest.mark.parametrize('id', ['oelint.file.underscores'])
@pytest.mark.parametrize('occurrence', [0])
@pytest.mark.parametrize('input',
[
{
'oelint_adv_test.bb':
'''
inherit core-image
'''
},
{
'oelint_adv_test.bb':
'''
inherit image
'''
},
{
'oelint_adv_test.bb':
'''
IMAGE_INSTALL += "foo"
'''
},
{
'oelint-adv_1.2.3.bb':
'''
VAR = "a"
'''
},
{
'oelint-adv_git.bb':
'''
VAR = "a"
'''
},
],
)
def test_good(self, input, id, occurrence):
self.check_for_id(self._create_args(input), id, occurrence)
| 22.24359 | 67 | 0.410375 |
918cbf805cc990757fe75d5c5c6f15bf932a44c7 | 1,061 | py | Python | guisetup.py | ahmadbeirkdar/Simple-Library-Software | ecf63cad501aa5a6ae7b4a624db22e1c92f92bb9 | [
"MIT"
] | 2 | 2020-03-27T13:56:26.000Z | 2021-04-21T04:03:49.000Z | guisetup.py | ahmadbeirkdar/Library-Management-Software | ecf63cad501aa5a6ae7b4a624db22e1c92f92bb9 | [
"MIT"
] | null | null | null | guisetup.py | ahmadbeirkdar/Library-Management-Software | ecf63cad501aa5a6ae7b4a624db22e1c92f92bb9 | [
"MIT"
] | null | null | null | import sys
sys.path.append('gui')
from mainwindow import *
from userdialog import *
from classes import *
from datafunc import *
from bookadd import *
filename_book = "books.csv"
filename_person = "users.csv"
filename_data = "data.csv"
filename = "data.csv"
duetime = 14
a = parse_data(filename_book, filename_person, filename_data)
a.parse_books()
a.parse_person()
a.parse_data()
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow(filename, duetime, a)
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
#NOTE:
# Col 1 ID, Col 2 Title, Col 3 Author, Col 4 ISBN, Col 5 Location
#TODO:
# Implement bring back books -DONE
# Book add - DONE
# Implement book csv standared - Done
# csv append - DONE
# Loging system - DONE
# Implement settings
# default thingss
# Implement books due
# Email
# Due books list
# Implement Dues system
# csv
# Refresh main window for added books and data
# Add user functionality
| 20.018868 | 69 | 0.68426 |
be4760a2f55d0f90bc5eb3f92fe63439f54a616b | 233,293 | py | Python | tests/app/main/test_frameworks.py | alphagov-mirror/digitalmarketplace-supplier-frontend | 349af2ec867f784c524a6a1c42b069f6d302e513 | [
"MIT"
] | null | null | null | tests/app/main/test_frameworks.py | alphagov-mirror/digitalmarketplace-supplier-frontend | 349af2ec867f784c524a6a1c42b069f6d302e513 | [
"MIT"
] | null | null | null | tests/app/main/test_frameworks.py | alphagov-mirror/digitalmarketplace-supplier-frontend | 349af2ec867f784c524a6a1c42b069f6d302e513 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import mock
from collections import OrderedDict
from datetime import datetime
from io import BytesIO
from itertools import chain
from urllib.parse import urljoin
from freezegun import freeze_time
from lxml import html
import pytest
from werkzeug.datastructures import MultiDict
from dmapiclient import (
APIError,
HTTPError
)
from dmapiclient.audit import AuditTypes
from dmcontent.errors import ContentNotFoundError
from dmtestutils.api_model_stubs import FrameworkStub, SupplierStub
from dmtestutils.fixtures import valid_pdf_bytes
from dmutils.email.exceptions import EmailError
from dmutils.s3 import S3ResponseError
from app.main.forms.frameworks import ReuseDeclarationForm
from ..helpers import (
BaseApplicationTest,
MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin,
FULL_G7_SUBMISSION,
valid_g9_declaration_base,
assert_args_and_raise,
assert_args_and_return,
)
def _return_fake_s3_file_dict(directory, filename, ext, last_modified=None, size=None):
return {
'path': '{}{}.{}'.format(directory, filename, ext),
'filename': filename,
'ext': ext,
'last_modified': last_modified or '2015-08-17T14:00:00.000Z',
'size': size if size is not None else 1
}
def get_g_cloud_8():
return BaseApplicationTest.framework(
status='standstill',
name='G-Cloud 8',
slug='g-cloud-8',
framework_agreement_version='v1.0'
)
def _extract_guidance_links(doc):
return OrderedDict(
(
section_li.xpath("normalize-space(string(.//h2))"),
tuple(
(
item_li.xpath("normalize-space(string(.//a))") or None,
item_li.xpath("string(.//a/@href)") or None,
item_li.xpath(
(
"normalize-space(string(.//time"
" | "
"./following-sibling::p[@class='dm-attachment__metadata']//time))"
)
) or None,
item_li.xpath(
(
"string(.//time/@datetime"
" | "
"./following-sibling::p[@class='dm-attachment__metadata']//time/@datetime)"
)
) or None,
)
for item_li in section_li.xpath(".//p[.//a] | .//h3[.//a]")
),
)
for section_li in doc.xpath("//main//*[./h2][.//p//a | .//section[@class='dm-attachment']//a]")
)
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboard(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_framework_dashboard_shows_for_pending_if_declaration_exists(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath("//h1[normalize-space(string())=$b]", b="Your G-Cloud 7 application")) == 1
def test_framework_dashboard_shows_for_live_if_declaration_exists(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='live')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath("//h1[normalize-space(string())=$b]", b="G-Cloud 7 documents")) == 1
def test_does_not_show_for_live_if_no_declaration(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='live')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(declaration=None)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 404
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_email_sent_when_interest_registered_in_framework(self, mock_dmnotifyclient_class, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
self.data_api_client.find_users_iter.return_value = [
{'emailAddress': 'email1', 'active': True},
{'emailAddress': 'email2', 'active': True},
{'emailAddress': 'email3', 'active': False}
]
mock_dmnotifyclient_instance = mock_dmnotifyclient_class.return_value
mock_dmnotifyclient_instance.templates = {'framework-application-started': '123456789'}
res = self.client.post("/suppliers/frameworks/g-cloud-7")
self.data_api_client.register_framework_interest.assert_called_once_with(
1234,
"g-cloud-7",
"email@email.com"
)
assert res.status_code == 200
assert mock_dmnotifyclient_instance.send_email.call_count == 2
assert mock_dmnotifyclient_instance.send_email.call_args[1].get('template_name_or_id') == '123456789'
def test_interest_not_registered_in_framework_on_get(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/digital-outcomes-and-specialists")
assert res.status_code == 200
assert self.data_api_client.register_framework_interest.called is False
def test_interest_set_but_no_declaration(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_framework_interest.return_value = {'frameworks': ['g-cloud-7']}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(declaration=None)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
def test_shows_closed_message_if_pending_and_no_application_done(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_framework_interest.return_value = {'frameworks': ['g-cloud-7']}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'not-submitted'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
assert "You didn't submit an application." in heading[0].xpath('../p[1]/text()')[0]
def test_shows_closed_message_if_pending_and_application(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_framework_interest.return_value = {'frameworks': ['g-cloud-7']}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
lede = doc.xpath('//div[@class="summary-item-lede"]')
expected_string = "You made your supplier declaration and submitted 1 service for consideration."
assert (expected_string in lede[0].xpath('./p[1]/text()')[0])
assert "We’ll let you know the result of your application by " in lede[0].xpath('./p[2]/text()')[0]
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboardOpenApplications(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_declaration_status_when_complete_for_open_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//main//strong[@id="dm-declaration-done"][contains(text(), "Done")]')) == 1
def test_declaration_status_when_started_for_open_framework(self, s3):
self.login()
submission = FULL_G7_SUBMISSION.copy()
# User has not yet submitted page 3 of the declaration
del submission['SQ2-1abcd']
del submission['SQ2-1e']
del submission['SQ2-1f']
del submission['SQ2-1ghijklmn']
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration=submission, status='started')
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//main//strong[@id="dm-declaration-inprogress"][contains(text(), "In progress")]')) == 1
def test_declaration_status_when_company_details_not_complete_for_open_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.side_effect = APIError(mock.Mock(status_code=404))
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//main//strong[@id="dm-declaration-cantstart"]')) == 1
def test_downloads_shown_for_open_framework(self, s3):
files = [
('updates/communications/', 'file 1', 'odt', '2015-01-01T14:00:00.000Z'),
('updates/clarifications/', 'file 2', 'odt', '2015-02-02T14:00:00.000Z'),
('', 'g-cloud-7-proposed-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
# superfluous file that shouldn't be shown
('', 'g-cloud-7-supplier-pack', 'zip', '2015-01-01T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Download the invitation to apply",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf",
None,
None,
),
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Legal documents", (
(
"Download the proposed framework agreement",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-framework-agreement.pdf",
"Wednesday 1 June 2016",
"2016-06-01T14:00:00.000Z",
),
(
"Download the proposed \u2018call-off\u2019 contract",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-call-off.pdf",
"Sunday 1 May 2016",
"2016-05-01T14:00:00.000Z",
),
)),
("Communications", (
(
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
"Monday 2 February 2015",
"2015-02-02T14:00:00.000Z",
),
)),
("Reporting", (
(
"Download the reporting template",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls",
None,
None,
),
)),
))
assert not any(
doc.xpath("//main//a[contains(@href, $href_part)]", href_part=href_part)
for href_part in (
"g-cloud-7-final-framework-agreement.pdf",
"g-cloud-7-supplier-pack.zip",
)
)
assert len(doc.xpath(
"//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015",
)) == 1
assert not doc.xpath(
"//main//table[normalize-space(string(./caption))=$b]",
b="Agreement details",
)
assert s3.return_value.list.call_args_list == [
mock.call("g-cloud-7/communications", load_timestamps=True)
]
def test_downloads_shown_open_framework_clarification_questions_closed(self, s3):
files = [
('updates/communications/', 'file 1', 'odt', '2015-01-01T14:00:00.000Z'),
('updates/clarifications/', 'file 2', 'odt', '2015-02-02T14:00:00.000Z'),
('', 'g-cloud-7-proposed-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
# superfluous file that shouldn't be shown
('', 'g-cloud-7-supplier-pack', 'zip', '2015-01-01T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status="open", clarification_questions_open=False
)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Download the invitation to apply",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf",
None,
None,
),
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Legal documents", (
(
"Download the proposed framework agreement",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-framework-agreement.pdf",
"Wednesday 1 June 2016",
"2016-06-01T14:00:00.000Z",
),
(
"Download the proposed \u2018call-off\u2019 contract",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-call-off.pdf",
"Sunday 1 May 2016",
"2016-05-01T14:00:00.000Z",
),
)),
("Communications", (
(
"View communications and clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
"Monday 2 February 2015",
"2015-02-02T14:00:00.000Z",
),
)),
("Reporting", (
(
"Download the reporting template",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls",
None,
None,
),
)),
))
assert not any(
doc.xpath("//main//a[contains(@href, $href_part)]", href_part=href_part)
for href_part
in ("g-cloud-7-final-framework-agreement.pdf", "g-cloud-7-supplier-pack.zip")
)
assert not doc.xpath("//main[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
assert s3.return_value.list.call_args_list == [
mock.call("g-cloud-7/communications", load_timestamps=True)
]
def test_final_agreement_download_shown_open_framework(self, s3):
files = [
('updates/communications/', 'file 1', 'odt', '2015-01-01T14:00:00.000Z'),
('updates/clarifications/', 'file 2', 'odt', '2015-02-02T14:00:00.000Z'),
('', 'g-cloud-7-proposed-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
('', 'g-cloud-7-final-framework-agreement', 'pdf', '2016-06-02T14:00:00.000Z'),
# present but should be overridden by final agreement file
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-11T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Download the invitation to apply",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf",
None,
None,
),
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Legal documents", (
(
"Download the framework agreement",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-final-framework-agreement.pdf",
"Thursday 2 June 2016",
"2016-06-02T14:00:00.000Z",
),
(
"Download the proposed \u2018call-off\u2019 contract",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-proposed-call-off.pdf",
"Sunday 1 May 2016",
"2016-05-01T14:00:00.000Z",
),
)),
("Communications", (
(
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
"Monday 2 February 2015",
"2015-02-02T14:00:00.000Z",
),
)),
("Reporting", (
(
"Download the reporting template",
"/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls",
None,
None,
),
)),
))
assert not any(
doc.xpath("//main//a[contains(@href, $href_part)]", href_part=href_part)
for href_part
in ("g-cloud-7-proposed-framework-agreement.pdf", "g-cloud-7-supplier-pack.zip")
)
assert len(
doc.xpath("//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
) == 1
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
def test_no_updates_open_framework(self, s3):
files = [
('', 'g-cloud-7-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-proposed-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert (
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
None,
None,
) in extracted_guidance_links["Communications"]
assert len(
doc.xpath("//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
) == 1
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
def test_no_files_exist_open_framework(self, s3):
s3.return_value.list.return_value = []
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("Guidance", (
(
"Read about how to apply",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Communications", (
(
"View communications and ask clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
None,
None,
),
)),
))
assert not any(
doc.xpath(
"//a[contains(@href, $href_part) or normalize-space(string())=$label]",
href_part=href_part,
label=label,
) for href_part, label in (
(
"g-cloud-7-invitation.pdf",
"Download the invitation to apply",
),
(
"g-cloud-7-proposed-framework-agreement.pdf",
"Download the proposed framework agreement",
),
(
"g-cloud-7-call-off.pdf",
"Download the proposed \u2018call-off\u2019 contract",
),
(
"g-cloud-7-reporting-template.xls",
"Download the reporting template",
),
(
"result-letter.pdf",
"Download your application result letter",
),
)
)
assert len(
doc.xpath("//main//p[contains(normalize-space(string()), $a)]",
a="until 5pm BST, Tuesday 22 September 2015")
) == 1
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
def test_returns_404_if_framework_does_not_exist(self, s3):
self.login()
self.data_api_client.get_framework.side_effect = APIError(mock.Mock(status_code=404))
res = self.client.get('/suppliers/frameworks/does-not-exist')
assert res.status_code == 404
def test_visit_to_framework_dashboard_saved_in_session_if_framework_open(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug="g-cloud-9", status="open")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
response = self.client.get("/suppliers/frameworks/g-cloud-9")
assert response.status_code == 200
with self.client.session_transaction() as session:
assert session["currently_applying_to"] == "g-cloud-9"
@pytest.mark.parametrize(
"framework_status",
["coming", "pending", "standstill", "live", "expired"]
)
def test_visit_to_framework_dashboard_not_saved_in_session_if_framework_not_open(self, s3, framework_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug="g-cloud-9", status=framework_status)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
self.client.get("/suppliers/frameworks/g-cloud-9")
with self.client.session_transaction() as session:
assert "currently_applying_to" not in session
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboardSuccessBanner(BaseApplicationTest):
"""Tests for the confidence banner on the declaration page."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.data_api_client.get_framework.return_value = self.framework(status='open')
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_success_banner_on_page_for_open_framework(self, _):
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'foo'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
status='complete',
application_company_details_confirmed=True,
)
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=True).single_result_response()
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
document = html.fromstring(res.get_data(as_text=True))
alert_banner = document.xpath('//div[@class="dm-alert dm-alert--success"]')
assert len(alert_banner) == 1
assert alert_banner[0].xpath(
"//h2[contains(normalize-space(string()), $t)]",
t="Your application is complete and will be submitted automatically.",
)
assert alert_banner[0].xpath(
"//div[contains(normalize-space(string()), $t)]",
t="You can change it at any time before the deadline."
)
# Check GA custom dimension values
assert len(document.xpath("//meta[@data-id='29' and @data-value='application_confirmed']")) == 1
def test_success_banner_with_unsubmitted_drafts_shows_different_message(self, _):
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'foo'},
{'serviceName': 'A service', 'status': 'not-submitted', 'lotSlug': 'foo'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
status='complete',
application_company_details_confirmed=True,
)
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=True).single_result_response()
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
document = html.fromstring(res.get_data(as_text=True))
alert_banner = document.xpath('//div[@class="dm-alert dm-alert--success"]')
assert len(alert_banner) == 1
assert alert_banner[0].xpath(
"//h2[contains(normalize-space(string()), $t)]",
t="Your application is complete and will be submitted automatically.",
)
assert alert_banner[0].xpath(
"//div[contains(normalize-space(string()), $t)]",
t="You still have 1 unsubmitted draft service. "
"You can edit or remove draft services at any time before the deadline.",
)
# Check GA custom dimension values
assert len(document.xpath("//meta[@data-id='29' and @data-value='application_confirmed']")) == 1
@pytest.mark.parametrize(
('declaration_status', 'draft_service_status', 'details_confirmed', 'ga_value'),
(
('started', 'submitted', True, 'services_confirmed'),
('complete', 'not-submitted', True, 'declaration_confirmed'),
('unstarted', 'not-submitted', True, 'company_details_confirmed'),
('unstarted', 'not-submitted', False, 'application_started'),
)
)
def test_success_banner_not_on_page_if_sections_incomplete(
self, _, declaration_status, draft_service_status, details_confirmed, ga_value
):
"""Change value and assert that confidence banner is not displayed."""
supplier_data = SupplierStub(company_details_confirmed=details_confirmed).single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': draft_service_status, 'lotSlug': 'foo'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
status=declaration_status,
declaration={'status': declaration_status},
application_company_details_confirmed=supplier_data['suppliers']['companyDetailsConfirmed'],
)
self.data_api_client.get_supplier.return_value = supplier_data
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
document = html.fromstring(res.get_data(as_text=True))
# Alert banner should not be shown
alert_banner = document.xpath('//div[@class="dm-alert dm-alert--success"]')
assert len(alert_banner) == 0
assert 'Your application is complete and will be submitted automatically.' not in res.get_data(as_text=True)
# Check GA custom dimension values
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath("//meta[@data-id='29' and @data-value='{}']".format(ga_value))) == 1
@mock.patch('dmutils.s3.S3')
class TestFrameworksDashboardPendingStandstill(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@staticmethod
def _extract_signing_details_table_rows(doc):
return tuple(
tuple(
td_th_dt_dd_elem.xpath("normalize-space(string())")
for td_th_dt_dd_elem in tr_elem.xpath("td|th|dt|dd")
)
for tr_elem in doc.xpath(
("//main//table[normalize-space(string(./caption))=$b]/tbody/tr"
"|"
"//main//dl/div[@class='govuk-summary-list__row']"),
b="Agreement details",
)
)
@property
def _boring_agreement_details(self):
# property so we always get a clean copy
return {
'frameworkAgreementVersion': 'v1.0',
'signerName': 'Martin Cunningham',
'signerRole': 'Foreman',
'uploaderUserId': 123,
'uploaderUserName': 'User',
'uploaderUserEmail': 'email@email.com',
}
_boring_agreement_returned_at = "2016-07-10T21:20:00.000000Z"
@property
def _boring_agreement_details_expected_table_results(self):
# property so we always get a clean copy
return (
(
'Person who signed',
'Martin Cunningham Foreman'
),
(
'Submitted by',
'User email@email.com Sunday 10 July 2016 at 10:20pm BST'
),
(
'Countersignature',
'Waiting for CCS to countersign'
),
)
def test_dashboard_pending_before_award_company_details_not_confirmed(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = []
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={}, application_company_details_confirmed=False
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $details_text)]",
details_text="You did not confirm your company details.",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $declaration_text)]",
declaration_text="You did not make a supplier declaration.",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $drafts_text)]",
drafts_text="You did not create any services.",
)
def test_dashboard_pending_before_award_services_but_no_declaration(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $declaration_text)]",
declaration_text="You did not make a supplier declaration",
)
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/submissions",
label="View draft services",
)
@pytest.mark.parametrize('declaration_status', ('started', 'complete'))
def test_dashboard_pending_before_award_with_services_and_declaration(self, s3, declaration_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={'status': declaration_status}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/declaration",
label="View your declaration",
)
if declaration_status == 'complete':
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $declaration_text)]",
declaration_text="You made your supplier declaration",
)
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/submissions",
label="View submitted services",
)
@pytest.mark.parametrize('declaration_status', ('started', 'complete'))
def test_dashboard_pending_before_award_with_declaration_incomplete_services(self, s3, declaration_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'not-submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={'status': declaration_status}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/declaration",
label="View your declaration",
)
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/submissions",
label="View draft services",
)
@pytest.mark.parametrize('declaration_status', ('started', 'complete'))
def test_dashboard_pending_before_award_with_declaration_no_services(self, s3, declaration_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = []
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
declaration={'status': declaration_status}
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/declaration",
label="View your declaration",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $drafts_text)]",
drafts_text="You did not create any services.",
)
def test_result_letter_is_shown_when_is_in_standstill(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Download your application result letter' in data
def test_result_letter_is_not_shown_when_not_in_standstill(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Download your application result letter' not in data
def test_result_letter_is_not_shown_when_no_application(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'not-submitted'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Download your application result letter' not in data
def test_link_to_unsigned_framework_agreement_is_shown_if_supplier_is_on_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Sign and return your framework agreement' in data
assert u'Download your countersigned framework agreement' not in data
def test_pending_success_message_is_explicit_if_supplier_is_on_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status='standstill', framework_agreement_version=None
)
self.data_api_client.find_draft_services.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=True)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
data = res.get_data(as_text=True)
assert (
'Your application was successful.'
) in data
assert 'Download your application award letter (.pdf)' in data
assert 'This letter is a record of your successful G-Cloud 7 application.' in data
assert 'You made your supplier declaration and submitted 1 service.' not in data
assert 'Download your application result letter (.pdf)' not in data
assert 'This letter informs you if your G-Cloud 7 application has been successful.' not in data
def test_link_to_framework_agreement_is_not_shown_if_supplier_is_not_on_framework(self, s3):
self.login()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=False)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
data = res.get_data(as_text=True)
assert u'Sign and return your framework agreement' not in data
def test_pending_success_message_is_equivocal_if_supplier_is_on_framework(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=False)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
data = res.get_data(as_text=True)
assert (
'Your application was successful. You\'ll be able to sell services when the G-Cloud 7 framework is live'
) not in data
assert 'Download your application award letter (.pdf)' not in data
assert 'This letter is a record of your successful G-Cloud 7 application.' not in data
assert 'You made your supplier declaration and submitted 1 service.' in data
assert 'Download your application result letter (.pdf)' in data
assert 'This letter informs you if your G-Cloud 7 application has been successful.' in data
def test_countersigned_framework_agreement_non_fav_framework(self, s3):
# "fav" being "frameworkAgreementVersion"
files = [
('', 'g-cloud-7-final-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-7-final-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-7-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-7/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='pathy/mc/path.face',
countersigned=True,
countersigned_path='g-cloud-7/agreements/1234/1234-countersigned-agreement.pdf',
)
res = self.client.get("/suppliers/frameworks/g-cloud-7")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-7/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-7/declaration",
None,
None,
),
)),
("Legal documents", (
(
'Download the standard framework agreement',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-final-framework-agreement.pdf',
None,
None,
),
(
"Download your signed framework agreement",
"/suppliers/frameworks/g-cloud-7/agreements/pathy/mc/path.face",
None,
None,
),
(
"Download your countersigned framework agreement",
"/suppliers/frameworks/g-cloud-7/agreements/countersigned-agreement.pdf",
None,
None,
),
(
'Download your application result letter',
'/suppliers/frameworks/g-cloud-7/agreements/result-letter.pdf',
None,
None,
),
(
'Download the call-off contract template',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-final-call-off.pdf',
None,
None,
),
)),
("Guidance", (
(
'Download the invitation to apply',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-invitation.pdf',
None,
None,
),
(
"Read about how to sell your services",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Communications", (
(
"View communications and clarification questions",
"/suppliers/frameworks/g-cloud-7/updates",
None,
None,
),
)),
('Reporting', (
(
'Download the reporting template',
'/suppliers/frameworks/g-cloud-7/files/g-cloud-7-reporting-template.xls',
None,
None,
),
)),
))
assert not doc.xpath(
"//main//table[normalize-space(string(./caption))=$b]",
b="Agreement details",
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="You can start selling your",
)
# neither of these should exist because it's a pre-frameworkAgreementVersion framework
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_countersigned_framework_agreement_fav_framework(self, s3):
# "fav" being "frameworkAgreementVersion"
files = [
('', 'g-cloud-8-final-call-off', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-8-invitation', 'pdf', '2016-05-01T14:00:00.000Z'),
('', 'g-cloud-8-final-framework-agreement', 'pdf', '2016-06-01T14:00:00.000Z'),
('', 'g-cloud-8-reporting-template', 'xls', '2016-06-06T14:00:00.000Z'),
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict(
'g-cloud-8/communications/{}'.format(section), filename, ext, last_modified=last_modified
) for section, filename, ext, last_modified in files
]
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='pathy/mc/path.face',
agreement_returned_at=self._boring_agreement_returned_at,
countersigned=True,
countersigned_path='g-cloud-8/agreements/1234/1234-countersigned-agreement.pdf',
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/suppliers/frameworks/g-cloud-7/agreements/result-letter.pdf",
label="Download your application result letter",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
("Legal documents", (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
"Download your \u2018original\u2019 framework agreement signature page",
"/suppliers/frameworks/g-cloud-8/agreements/pathy/mc/path.face",
None,
None,
),
(
"Download your \u2018counterpart\u2019 framework agreement signature page",
"/suppliers/frameworks/g-cloud-8/agreements/countersigned-agreement.pdf",
None,
None,
),
(
'Download the call-off contract template',
'/suppliers/frameworks/g-cloud-8/files/g-cloud-8-final-call-off.pdf',
None,
None,
),
)),
("Guidance", (
(
'Download the invitation to apply',
'/suppliers/frameworks/g-cloud-8/files/g-cloud-8-invitation.pdf',
None,
None,
),
(
"Read about how to sell your services",
"https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply",
None,
None,
),
)),
("Communications", (
(
"View communications and clarification questions",
"/suppliers/frameworks/g-cloud-8/updates",
None,
None,
),
)),
('Reporting', (
(
'Download the reporting template',
'/suppliers/frameworks/g-cloud-8/files/g-cloud-8-reporting-template.xls',
None,
None,
),
)),
))
assert not doc.xpath("//main//table[normalize-space(string(./caption))=$b]", b="Agreement details")
assert not doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages"
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service"
)
def test_shows_returned_agreement_details(self, s3):
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/suppliers/frameworks/g-cloud-8/agreements/result-letter.pdf",
label="Download your application result letter",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/framework-agreement.pdf',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert len(doc.xpath(
"//main//h1[normalize-space(string())=$b]",
b="Your G-Cloud 8 application",
)) == 1
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_countersigned_but_no_countersigned_path(self, s3):
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'A service', 'status': 'submitted', 'lotSlug': 'iaas'}
]
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at,
countersigned=True,
# note `countersigned_path` is not set: we're testing that the view behaves as though not countersigned
# i.e. is not depending on the `countersigned` property
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/framework-agreement.pdf',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert len(doc.xpath("//main//h1[normalize-space(string())=$b]", b="Your G-Cloud 8 application")) == 1
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_shows_contract_variation_link_after_agreement_returned(self, s3):
self.login()
g8_with_variation = get_g_cloud_8()
g8_with_variation['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.data_api_client.get_framework.return_value = g8_with_variation
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at,
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/framework-agreement.pdf',
None,
None,
),
(
'Read the proposed contract variation',
'/suppliers/frameworks/g-cloud-8/contract-variation/1',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_does_not_show_contract_variation_link_if_no_variation(self, s3):
self.login()
self.data_api_client.get_framework.return_value = get_g_cloud_8()
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_path='g-cloud-8/agreements/123-framework-agreement.pdf',
agreement_returned_at=self._boring_agreement_returned_at,
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[normalize-space(string())=$label]",
label="Read the proposed contract variation",
)
extracted_signing_details_table_rows = self._extract_signing_details_table_rows(doc)
assert extracted_signing_details_table_rows == \
self._boring_agreement_details_expected_table_results
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_does_not_show_contract_variation_link_if_agreement_not_returned(self, s3):
self.login()
g8_with_variation = get_g_cloud_8()
g8_with_variation['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.data_api_client.get_framework.return_value = g8_with_variation
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-7/agreement",
label="Sign and return your framework agreement",
)
assert not doc.xpath(
"//main//a[contains(@href, $href_part) or normalize-space(string())=$label]",
href_part="contract-variation/1",
label="Read the proposed contract variation",
)
assert not doc.xpath(
"//main//table[normalize-space(string(./caption))=$b]",
b="Agreement details",
)
assert not doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
def test_shows_contract_variation_alternate_link_text_after_agreed_by_ccs(self, s3):
self.login()
g8_with_variation = get_g_cloud_8()
g8_with_variation['frameworks']['variations'] = {
"1": {
"createdAt": "2018-08-16",
"countersignedAt": "2018-10-01",
"countersignerName": "A.N. Other",
"countersignerRole": "Head honcho",
},
}
self.data_api_client.get_framework.return_value = g8_with_variation
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True,
agreement_returned=True,
agreement_details=self._boring_agreement_details,
agreement_returned_at=self._boring_agreement_returned_at,
agreement_path='g-cloud-8/agreements/1234/1234-signed-agreement.pdf',
agreed_variations={
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
},
},
)
res = self.client.get("/suppliers/frameworks/g-cloud-8")
assert res.status_code == 200
data = res.get_data(as_text=True)
doc = html.fromstring(data)
assert not doc.xpath(
"//main//a[@href=$href or normalize-space(string())=$label]",
href="/frameworks/g-cloud-8/agreement",
label="Sign and return your framework agreement",
)
extracted_guidance_links = _extract_guidance_links(doc)
assert extracted_guidance_links == OrderedDict((
("You submitted:", (
(
'View submitted services',
'/suppliers/frameworks/g-cloud-8/submissions',
None,
None,
),
(
"View your declaration",
"/suppliers/frameworks/g-cloud-8/declaration",
None,
None,
),
)),
('Legal documents', (
(
'Read the standard framework agreement',
'https://www.gov.uk/government/publications/g-cloud-8-framework-agreement',
None,
None,
),
(
u'Download your \u2018original\u2019 framework agreement signature page',
'/suppliers/frameworks/g-cloud-8/agreements/signed-agreement.pdf',
None,
None,
),
(
'View the signed contract variation',
'/suppliers/frameworks/g-cloud-8/contract-variation/1',
None,
None,
),
)),
('Guidance', (
(
'Read about how to sell your services',
'https://www.gov.uk/guidance/g-cloud-suppliers-guide#how-to-apply',
None,
None,
),
)),
('Communications', (
(
'View communications and clarification questions',
'/suppliers/frameworks/g-cloud-8/updates',
None,
None,
),
)),
))
assert not doc.xpath(
"//main//a[normalize-space(string())=$label]",
label="Read the proposed contract variation",
)
assert doc.xpath("//main//p[contains(normalize-space(string()), $b)]", b="You can start selling your")
assert not doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your original and counterpart signature pages",
)
assert doc.xpath(
"//main//p[contains(normalize-space(string()), $b)]",
b="Your framework agreement signature page has been sent to the Crown Commercial Service",
)
@pytest.mark.parametrize(
'supplier_framework_kwargs,link_href',
(
({'declaration': None}, '/suppliers/frameworks/g-cloud-7/declaration/start'),
({}, '/suppliers/frameworks/g-cloud-7/declaration')
)
)
def test_make_supplier_declaration_links_to_correct_page(
self, s3, supplier_framework_kwargs, link_href
):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
application_company_details_confirmed=True,
**supplier_framework_kwargs,
)
response = self.client.get('/suppliers/frameworks/g-cloud-7')
document = html.fromstring(response.get_data(as_text=True))
assert (
document.xpath(
"//a[contains(normalize-space(string()), $link_label)]/@href",
link_label="Make your supplier declaration"
)[0]
) == link_href
@mock.patch('dmutils.s3.S3')
class TestFrameworkAgreementDocumentDownload(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_download_document_fails_if_no_supplier_framework(self, S3):
self.data_api_client.get_supplier_framework_info.side_effect = APIError(mock.Mock(status_code=404))
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 404
def test_download_document_fails_if_no_supplier_declaration(self, S3):
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(declaration=None)
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 404
def test_download_document(self, S3):
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = 'http://url/path?param=value'
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 302
assert res.location == 'http://asset-host/path?param=value'
uploader.get_signed_url.assert_called_with('g-cloud-7/agreements/1234/1234-example.pdf')
def test_download_document_with_asset_url(self, S3):
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = 'http://url/path?param=value'
self.app.config['DM_ASSETS_URL'] = 'https://example'
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/agreements/example.pdf')
assert res.status_code == 302
assert res.location == 'https://example/path?param=value'
uploader.get_signed_url.assert_called_with('g-cloud-7/agreements/1234/1234-example.pdf')
@mock.patch('dmutils.s3.S3')
class TestFrameworkDocumentDownload(BaseApplicationTest):
def test_download_document(self, S3):
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = 'http://url/path?param=value'
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/files/example.pdf')
assert res.status_code == 302
assert res.location == 'http://asset-host/path?param=value'
uploader.get_signed_url.assert_called_with('g-cloud-7/communications/example.pdf')
def test_download_document_returns_404_if_url_is_None(self, S3):
uploader = mock.Mock()
S3.return_value = uploader
uploader.get_signed_url.return_value = None
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/files/example.pdf')
assert res.status_code == 404
@mock.patch('dmutils.s3.S3')
class TestDownloadDeclarationDocument(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.login()
self.data_api_client_patch = mock.patch('app.main.views.services.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_document_url(self, s3):
s3.return_value.get_signed_url.return_value = 'http://example.com/modern-slavery-statement.pdf'
res = self.client.get(
'/suppliers/assets/g-cloud-11/documents/1234/modern-slavery-statement.pdf'
)
assert res.status_code == 302
assert res.headers['Location'] == 'http://asset-host/modern-slavery-statement.pdf'
def test_missing_document_url(self, s3):
s3.return_value.get_signed_url.return_value = None
res = self.client.get(
'/suppliers/frameworks/g-cloud-11/documents/1234/modern-slavery-statement.pdf'
)
assert res.status_code == 404
def test_document_url_not_matching_user_supplier(self, s3):
res = self.client.get(
'/suppliers/frameworks/g-cloud-11/documents/999/modern-slavery-statement.pdf'
)
assert res.status_code == 404
class TestStartSupplierDeclaration(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_start_declaration_goes_to_declaration_overview_page(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
response = self.client.get('/suppliers/frameworks/g-cloud-7/declaration/start')
document = html.fromstring(response.get_data(as_text=True))
assert (
document.xpath("//a[normalize-space(string(.))='Start your declaration']/@href")[0]
== '/suppliers/frameworks/g-cloud-7/declaration/reuse'
)
assert document.xpath(
"//p[contains(normalize-space(string()), $t)]",
t="change your answers before the application deadline at "
"5pm\u00a0BST,\u00a0Tuesday\u00a06\u00a0October\u00a02015.",
)
@pytest.mark.parametrize('method', ('get', 'post'))
class TestDeclarationOverviewSubmit(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
"""Behaviour common to both GET and POST views on path /suppliers/frameworks/g-cloud-7/declaration."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_supplier_not_interested(self, method):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(status="open"), "g-cloud-7"
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_raise(
APIError(mock.Mock(status_code=404)),
1234,
"g-cloud-7",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = getattr(self.client, method)("/suppliers/frameworks/g-cloud-7/declaration")
assert response.status_code == 404
def test_framework_coming(self, method):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(status="coming"),
"g-cloud-7",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(framework_slug="g-cloud-7"),
1234,
"g-cloud-7",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = getattr(self.client, method)("/suppliers/frameworks/g-cloud-7/declaration")
assert response.status_code == 404
def test_framework_unknown(self, method):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_raise(
APIError(mock.Mock(status_code=404)),
"muttoning-clouds",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_raise(
APIError(mock.Mock(status_code=404)),
1234,
"muttoning-clouds",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = getattr(self.client, method)("/suppliers/frameworks/muttoning-clouds/declaration")
assert response.status_code == 404
class TestDeclarationOverview(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@staticmethod
def _extract_section_information(doc, section_title, expect_edit_link=True):
"""
given a section (full text) name, returns that section's relevant information in a tuple (format described
in comments)
"""
tables = doc.xpath(
"//table[preceding::h2[1][normalize-space(string())=$section_title]]",
section_title=section_title,
)
assert len(tables) == 1
table = tables[0]
edit_as = doc.xpath(
"//a[@class='summary-change-link'][preceding::h2[1][normalize-space(string())=$section_title]]",
section_title=section_title,
)
assert ([a.xpath("normalize-space(string())") for a in edit_as] == ["Edit"]) is expect_edit_link
return (
# table caption text
table.xpath("normalize-space(string(./caption))"),
# "Edit" link href
edit_as[0].xpath("@href")[0] if expect_edit_link else None,
tuple(
(
# contents of row heading
row.xpath("normalize-space(string(./td[@class='summary-item-field-first']))"),
# full text contents of row "value"
row.xpath("normalize-space(string(./td[@class='summary-item-field']))"),
# full text contents of each a element in row value
tuple(a.xpath("normalize-space(string())") for a in row.xpath(
"./td[@class='summary-item-field']//a"
)),
# href of each a element in row value
tuple(row.xpath("./td[@class='summary-item-field']//a/@href")),
# full text contents of each li element in row value
tuple(li.xpath("normalize-space(string())") for li in row.xpath(
"./td[@class='summary-item-field']//li"
)),
) for row in table.xpath(".//tr[contains(@class,'summary-item-row')]")
)
)
@staticmethod
def _section_information_strip_edit_href(section_information):
row_heading, edit_href, rows = section_information
return row_heading, None, rows
def _setup_data_api_client(self, framework_status, framework_slug, declaration, prefill_fw_slug):
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(slug=framework_slug, name="F-Cumulus 0", status=framework_status),
framework_slug,
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(
framework_slug=framework_slug,
declaration=declaration,
prefill_declaration_from_framework_slug=prefill_fw_slug,
),
1234,
framework_slug,
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
# corresponds to the parametrization args:
# "framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections"
_common_parametrization = tuple(
chain.from_iterable(chain(
(( # noqa
"g-cloud-9",
empty_declaration,
False,
prefill_fw_slug,
(
( # expected result for "Providing suitable services" section as returned by
# _extract_section_information
"Providing suitable services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",
(
(
"Services are cloud-related",
"Answer question",
("Answer question",),
("/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",),
(),
),
(
"Services in scope for G-Cloud",
"Answer question",
("Answer question",),
("/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#servicesDoNotInclude",),
(),
),
(
"Buyers pay for what they use",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services"
"#payForWhatUse",
),
(),
),
(
"What your team will deliver",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#offerServicesYourselves",
),
(),
),
(
"Contractual responsibility and accountability",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#fullAccountability",
),
(),
),
),
),
( # expected result for "Grounds for mandatory exclusion" section as returned by
# _extract_section_information
"Grounds for mandatory exclusion",
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",
(
(
"Organised crime or conspiracy convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
("/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",),
(),
),
(
"Bribery or corruption convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#corruptionBribery",
),
(),
),
(
"Fraud convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#fraudAndTheft",
),
(),
),
(
"Terrorism convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#terrorism",
),
(),
),
(
"Organised crime convictions",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-"
"exclusion#organisedCrime",
),
(),
),
),
),
( # expected result for "How you’ll deliver your services" section as returned by
# _extract_section_information
"How you’ll deliver your services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-services",
(
(
"Subcontractors or consortia",
q_link_text_prefillable_section,
(q_link_text_prefillable_section,),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-"
"services",
),
(),
),
),
),
),
) for empty_declaration in (None, {})), # two possible ways of specifying a "empty" declaration - test both
(( # noqa
"g-cloud-9",
{
"status": "started",
"conspiracy": True,
"corruptionBribery": False,
"fraudAndTheft": True,
"terrorism": False,
"organisedCrime": True,
"subcontracting": [
"yourself without the use of third parties (subcontractors)",
"as a prime contractor, using third parties (subcontractors) to provide all services",
],
},
False,
prefill_fw_slug,
(
( # expected result for "Providing suitable services" section as returned by
# _extract_section_information
"Providing suitable services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",
(
(
"Services are cloud-related",
"Answer question",
("Answer question",),
("/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",),
(),
),
(
"Services in scope for G-Cloud",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#servicesDoNotInclude",
),
(),
),
(
"Buyers pay for what they use",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#payForWhatUse",
),
(),
),
(
"What your team will deliver",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#offerServicesYourselves",
),
(),
),
(
"Contractual responsibility and accountability",
"Answer question",
("Answer question",),
(
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-"
"services#fullAccountability",
),
(),
),
),
),
( # expected result for "Grounds for mandatory exclusion" section as returned by
# _extract_section_information
"Grounds for mandatory exclusion",
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",
(
(
"Organised crime or conspiracy convictions",
"Yes",
(),
(),
(),
),
(
"Bribery or corruption convictions",
"No",
(),
(),
(),
),
(
"Fraud convictions",
"Yes",
(),
(),
(),
),
(
"Terrorism convictions",
"No",
(),
(),
(),
),
(
"Organised crime convictions",
"Yes",
(),
(),
(),
),
),
),
( # expected result for "How you’ll deliver your services" section as returned by
# _extract_section_information
"How you’ll deliver your services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-services",
(
(
"Subcontractors or consortia",
(
"yourself without the use of third parties (subcontractors) as a prime contractor, "
"using third parties (subcontractors) to provide all services"
),
(),
(),
(
"yourself without the use of third parties (subcontractors)",
"as a prime contractor, using third parties (subcontractors) to provide all services",
),
),
),
),
),
),),
(( # noqa
"g-cloud-9",
dict(status=declaration_status, **(valid_g9_declaration_base())),
True,
prefill_fw_slug,
(
( # expected result for "Providing suitable services" section as returned by
# _extract_section_information
"Providing suitable services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/providing-suitable-services",
(
(
"Services are cloud-related",
"Yes",
(),
(),
(),
),
(
"Services in scope for G-Cloud",
"Yes",
(),
(),
(),
),
(
"Buyers pay for what they use",
"Yes",
(),
(),
(),
),
(
"What your team will deliver",
"No",
(),
(),
(),
),
(
"Contractual responsibility and accountability",
"Yes",
(),
(),
(),
),
),
),
( # expected result for "Grounds for mandatory exclusion" section as returned by
# _extract_section_information
"Grounds for mandatory exclusion",
"/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion",
(
(
"Organised crime or conspiracy convictions",
"No",
(),
(),
(),
),
(
"Bribery or corruption convictions",
"Yes",
(),
(),
(),
),
(
"Fraud convictions",
"No",
(),
(),
(),
),
(
"Terrorism convictions",
"Yes",
(),
(),
(),
),
(
"Organised crime convictions",
"No",
(),
(),
(),
),
),
),
( # expected result for "How you’ll deliver your services" section as returned by
# _extract_section_information
"How you’ll deliver your services",
"/suppliers/frameworks/g-cloud-9/declaration/edit/how-youll-deliver-your-services",
(
(
"Subcontractors or consortia",
"yourself without the use of third parties (subcontractors)",
(),
(),
(),
),
),
),
),
) for declaration_status in ("started", "complete",)),
) for prefill_fw_slug, q_link_text_prefillable_section in (
# test all of the previous combinations with two possible values of prefill_fw_slug
(None, "Answer question",),
("some-previous-framework", "Review answer",),
)))
# this is more straightforward than _common_parametrization because we only have to care about non-open frameworks
# G7 doesn't (yet?) have any "short names" for questions and so will be listing the answers in the
# overview against their full verbose questions so any sections that we wanted to assert the content of
# would require a reference copy of all its full question texts kept here. we don't want to do this so for
# now don't assert any G7 sections...
_g7_parametrization = (
("g-cloud-7", dict(FULL_G7_SUBMISSION, status="started"), True, None, ()),
("g-cloud-7", dict(FULL_G7_SUBMISSION, status="complete"), True, None, ()),
("g-cloud-7", None, False, None, ()),
("g-cloud-7", {}, False, None, ()),
)
@pytest.mark.parametrize(
"framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections",
_g7_parametrization
)
def test_display_open(self, framework_slug, declaration, decl_valid, prefill_fw_slug, expected_sections):
self._setup_data_api_client("open", framework_slug, declaration, prefill_fw_slug)
self.login()
response = self.client.get("/suppliers/frameworks/{}/declaration".format(framework_slug))
assert response.status_code == 200
doc = html.fromstring(response.get_data(as_text=True))
breadcrumbs = doc.xpath("//div[@class='govuk-breadcrumbs']/ol/li")
assert tuple(li.xpath("normalize-space(string())") for li in breadcrumbs) == (
"Digital Marketplace",
"Your account",
"Apply to F-Cumulus 0",
"Your declaration overview",
)
assert tuple(li.xpath(".//a/@href") for li in breadcrumbs) == (
['/'],
['/suppliers'],
[f'/suppliers/frameworks/{framework_slug}'],
[],
)
assert bool(doc.xpath(
"//p[contains(normalize-space(string()), $t)][contains(normalize-space(string()), $f)]",
t="You must answer all questions and make your declaration before",
f="F-Cumulus 0",
)) is not decl_valid
assert bool(doc.xpath(
"//p[contains(normalize-space(string()), $t)][contains(normalize-space(string()), $f)]",
t="You must make your declaration before",
f="F-Cumulus 0",
)) is (decl_valid and declaration.get("status") != "complete")
assert len(doc.xpath(
"//p[contains(normalize-space(string()), $t)]",
t="You can come back and edit your answers at any time before the deadline.",
)) == (2 if decl_valid and declaration.get("status") != "complete" else 0)
assert len(doc.xpath(
"//p[contains(normalize-space(string()), $t)][not(contains(normalize-space(string()), $d))]",
t="You can come back and edit your answers at any time",
d="deadline",
)) == (2 if decl_valid and declaration.get("status") == "complete" else 0)
if prefill_fw_slug is None:
assert not doc.xpath("//a[normalize-space(string())=$t]", t="Review answer")
assert bool(doc.xpath(
"//a[normalize-space(string())=$a or normalize-space(string())=$b]",
a="Answer question",
b="Review answer",
)) is not decl_valid
if not decl_valid:
# assert that all links with the label "Answer question" or "Review answer" link to some subpage (by
# asserting that there are none that don't, having previously determined that such-labelled links exist)
assert not doc.xpath(
# we want the href to *contain* $u but not *be* $u
"//a[normalize-space(string())=$a or normalize-space(string())=$b]"
"[not(starts-with(@href, $u)) or @href=$u]",
a="Answer question",
b="Review answer",
u="/suppliers/frameworks/{}/declaration/".format(framework_slug),
)
if decl_valid and declaration.get("status") != "complete":
mdf_actions = doc.xpath(
"//form[@method='POST'][.//button[normalize-space(string())=$t]]"
"[.//input[@name='csrf_token']]/@action",
t="Make declaration",
)
assert len(mdf_actions) == 2
assert all(
urljoin("/suppliers/frameworks/{}/declaration".format(framework_slug), action) ==
"/suppliers/frameworks/{}/declaration".format(framework_slug)
for action in mdf_actions
)
else:
assert not doc.xpath("//button[normalize-space(string())=$t]", t="Make declaration")
assert doc.xpath(
"//a[normalize-space(string())=$t][@href=$u]",
t="Return to application",
u="/suppliers/frameworks/{}".format(framework_slug),
)
for expected_section in expected_sections:
assert self._extract_section_information(doc, expected_section[0]) == expected_section
@pytest.mark.parametrize(
"framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections",
tuple(
(
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
)
for framework_slug, declaration, decl_valid, prefill_fw_slug, expected_sections
in chain(_common_parametrization, _g7_parametrization)
if (declaration or {}).get("status") == "complete"
)
)
@pytest.mark.parametrize("framework_status", ("pending", "standstill", "live", "expired",))
def test_display_closed(
self,
framework_status,
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
):
self._setup_data_api_client(framework_status, framework_slug, declaration, prefill_fw_slug)
self.login()
response = self.client.get("/suppliers/frameworks/{}/declaration".format(framework_slug))
assert response.status_code == 200
doc = html.fromstring(response.get_data(as_text=True))
breadcrumbs = doc.xpath("//div[@class='govuk-breadcrumbs']/ol/li")
assert tuple(li.xpath("normalize-space(string())") for li in breadcrumbs) == (
"Digital Marketplace",
"Your account",
"Your F-Cumulus 0 application",
"Your declaration overview",
)
assert tuple(li.xpath(".//a/@href") for li in breadcrumbs) == (
['/'],
['/suppliers'],
[f'/suppliers/frameworks/{framework_slug}'],
[],
)
# there shouldn't be any links to the "edit" page
assert not any(
urljoin("/suppliers/frameworks/{}/declaration".format(framework_slug), a.attrib["href"]).startswith(
"/suppliers/frameworks/{}/declaration/edit/".format(framework_slug)
)
for a in doc.xpath("//a[@href]")
)
# no submittable forms should be pointing at ourselves
assert not any(
urljoin(
"/suppliers/frameworks/{}/declaration".format(framework_slug),
form.attrib["action"],
) == "/suppliers/frameworks/{}/declaration".format(framework_slug)
for form in doc.xpath("//form[.//input[@type='submit'] or .//button]")
)
assert not doc.xpath("//a[@href][normalize-space(string())=$label]", label="Answer question")
assert not doc.xpath("//a[@href][normalize-space(string())=$label]", label="Review answer")
assert not doc.xpath("//p[contains(normalize-space(string()), $t)]", t="make your declaration")
assert not doc.xpath("//p[contains(normalize-space(string()), $t)]", t="edit your answers")
for expected_section in expected_sections:
assert self._extract_section_information(
doc,
expected_section[0],
expect_edit_link=False,
) == self._section_information_strip_edit_href(expected_section)
@pytest.mark.parametrize(
"framework_slug,declaration,decl_valid,prefill_fw_slug,expected_sections",
tuple(
(
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
)
for framework_slug, declaration, decl_valid, prefill_fw_slug, expected_sections
in chain(_common_parametrization, _g7_parametrization)
if (declaration or {}).get("status") != "complete"
)
)
@pytest.mark.parametrize("framework_status", ("pending", "standstill", "live", "expired",))
def test_error_closed(
self,
framework_status,
framework_slug,
declaration,
decl_valid,
prefill_fw_slug,
expected_sections,
):
self._setup_data_api_client(framework_status, framework_slug, declaration, prefill_fw_slug)
self.login()
response = self.client.get("/suppliers/frameworks/{}/declaration".format(framework_slug))
assert response.status_code == 410
@pytest.mark.parametrize("framework_status", ("coming", "open", "pending", "standstill", "live", "expired",))
def test_error_nonexistent_framework(self, framework_status):
self._setup_data_api_client(framework_status, "g-cloud-31415", {"status": "complete"}, None)
self.login()
response = self.client.get("/suppliers/frameworks/g-cloud-31415/declaration")
assert response.status_code == 404
class TestDeclarationSubmit(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize("prefill_fw_slug", (None, "some-previous-framework",))
@pytest.mark.parametrize("invalid_declaration", (
None,
{},
{
# not actually complete - only first section is
"status": "complete",
"unfairCompetition": False,
"skillsAndResources": False,
"offerServicesYourselves": False,
"fullAccountability": True,
},
))
def test_invalid_declaration(self, invalid_declaration, prefill_fw_slug):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(slug="g-cloud-9", name="G-Cloud 9", status="open"),
"g-cloud-9",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(
framework_slug="g-cloud-9",
declaration=invalid_declaration,
prefill_declaration_from_framework_slug=prefill_fw_slug, # should have zero effect
),
1234,
"g-cloud-9",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = self.client.post("/suppliers/frameworks/g-cloud-9/declaration")
assert response.status_code == 400
@pytest.mark.parametrize("prefill_fw_slug", (None, "some-previous-framework",))
@pytest.mark.parametrize("declaration_status", ("started", "complete",))
@mock.patch("dmutils.s3.S3") # needed by the framework dashboard which our request gets redirected to
def test_valid_declaration(self, s3, prefill_fw_slug, declaration_status):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(slug="g-cloud-9", name="G-Cloud 9", status="open"),
"g-cloud-9",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(
framework_slug="g-cloud-9",
declaration=dict(status=declaration_status, **(valid_g9_declaration_base())),
prefill_declaration_from_framework_slug=prefill_fw_slug, # should have zero effect
),
1234,
"g-cloud-9",
)
self.data_api_client.set_supplier_declaration.side_effect = assert_args_and_return(
dict(status="complete", **(valid_g9_declaration_base())),
1234,
"g-cloud-9",
dict(status="complete", **(valid_g9_declaration_base())),
"email@email.com",
)
response = self.client.post("/suppliers/frameworks/g-cloud-9/declaration", follow_redirects=True)
# args of call are asserted by mock's side_effect
assert self.data_api_client.set_supplier_declaration.called is True
# this will be the response from the redirected-to view
assert response.status_code == 200
@pytest.mark.parametrize("framework_status", ("standstill", "pending", "live", "expired",))
def test_closed_framework_state(self, framework_status):
self.login()
self.data_api_client.get_framework.side_effect = assert_args_and_return(
self.framework(status=framework_status),
"g-cloud-7",
)
self.data_api_client.get_supplier_framework_info.side_effect = assert_args_and_return(
self.supplier_framework(framework_slug="g-cloud-7"),
1234,
"g-cloud-7",
)
self.data_api_client.set_supplier_declaration.side_effect = AssertionError("This shouldn't be called")
response = self.client.post("/suppliers/frameworks/g-cloud-7/declaration")
assert response.status_code == 404
class TestSupplierDeclaration(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize("empty_declaration", ({}, None,))
def test_get_with_no_previous_answers(self, empty_declaration):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration=empty_declaration,
)
self.data_api_client.get_supplier_declaration.side_effect = APIError(mock.Mock(status_code=404))
res = self.client.get('/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials')
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath('//input[@id="PR-1-yes"]/@checked') == []
assert doc.xpath('//input[@id="PR-1-no"]/@checked') == []
def test_get_with_with_previous_answers(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration={"status": "started", "PR1": False}
)
res = self.client.get('/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials')
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath('//input[@id="input-PR1-2"]/@checked')) == 1
def test_get_with_with_prefilled_answers(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={"status": "started"},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"conspiracy": True,
"corruptionBribery": False,
"fraudAndTheft": True,
"terrorism": False,
"organisedCrime": False,
}
)["frameworkInterest"]["declaration"]
}
# The grounds-for-mandatory-exclusion section has "prefill: True" in the declaration manifest
res = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion'
)
assert res.status_code == 200
self.data_api_client.get_supplier_declaration.assert_called_once_with(
1234, "digital-outcomes-and-specialists-2"
)
doc = html.fromstring(res.get_data(as_text=True))
# Radio buttons have been pre-filled with the correct answers
assert len(doc.xpath('//input[@id="input-conspiracy-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-fraudAndTheft-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-organisedCrime-2"][@value="False"]/@checked')) == 1
# Blue banner message is shown at top of page
assert doc.xpath('normalize-space(string(//div[@class="banner-information-without-action"]))') == \
"Answers on this page are from an earlier declaration and need review."
# Blue information messages are shown next to each question
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 5
for message in info_messages:
assert self.strip_all_whitespace(message.text) == self.strip_all_whitespace(
"This answer is from your Digital Stuff 2 declaration"
)
def test_get_with_with_partially_prefilled_answers(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={"status": "started"},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from - missing "corruptionBribery" and "terrorism" keys
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"conspiracy": True,
"fraudAndTheft": True,
"organisedCrime": False
}
)["frameworkInterest"]["declaration"]
}
# The grounds-for-mandatory-exclusion section has "prefill: True" in the declaration manifest
res = self.client.get('/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion')
assert res.status_code == 200
self.data_api_client.get_supplier_declaration.assert_called_once_with(
1234, "digital-outcomes-and-specialists-2"
)
doc = html.fromstring(res.get_data(as_text=True))
# Radio buttons have been pre-filled with the correct answers
assert len(doc.xpath('//input[@id="input-conspiracy-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-fraudAndTheft-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-organisedCrime-2"][@value="False"]/@checked')) == 1
# Radio buttons for missing keys exist but have not been pre-filled
assert len(doc.xpath('//input[@id="input-corruptionBribery-1"]')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-2"]')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-corruptionBribery-2"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-terrorism-1"]')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-2"]')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-terrorism-2"]/@checked')) == 0
# Blue banner message is shown at top of page
assert doc.xpath('normalize-space(string(//div[@class="banner-information-without-action"]))') == \
"Answers on this page are from an earlier declaration and need review."
# Blue information messages are shown next to pre-filled questions only
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 3
for message in info_messages:
assert self.strip_all_whitespace(message.text) == self.strip_all_whitespace(
"This answer is from your Digital Stuff 2 declaration"
)
def test_answers_not_prefilled_if_section_has_already_been_saved(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information with the grounds-for-mandatory-exclusion section complete
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={
"status": "started",
"conspiracy": False,
"corruptionBribery": True,
"fraudAndTheft": False,
"terrorism": True,
"organisedCrime": False
},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from - has relevant answers but should not ever be called
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"conspiracy": True,
"corruptionBribery": False,
"fraudAndTheft": True,
"terrorism": False,
"organisedCrime": False
}
)["frameworkInterest"]["declaration"]
}
# The grounds-for-mandatory-exclusion section has "prefill: True" in the declaration manifest
res = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/edit/grounds-for-mandatory-exclusion'
)
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
# Previous framework and declaration have not been fetchedself.
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9')
]
assert self.data_api_client.get_supplier_declaration.called is False
# Radio buttons have been filled with the current answers; not those from previous declaration
assert len(doc.xpath('//input[@id="input-conspiracy-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-corruptionBribery-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-fraudAndTheft-2"][@value="False"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-terrorism-1"][@value="True"]/@checked')) == 1
assert len(doc.xpath('//input[@id="input-organisedCrime-2"][@value="False"]/@checked')) == 1
# No blue banner message is shown at top of page
assert len(doc.xpath('//div[@class="banner-information-without-action"]')) == 0
# No blue information messages are shown next to each question
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 0
def test_answers_not_prefilled_if_section_marked_as_prefill_false(self):
self.login()
# Handle calls for both the current framework and for the framework to pre-fill from
self.data_api_client.get_framework.side_effect = lambda framework_slug: {
"g-cloud-9": self.framework(slug='g-cloud-9', name='G-Cloud 9', status='open'),
"digital-outcomes-and-specialists-2": self.framework(
slug='digital-outcomes-and-specialists-2',
name='Digital Stuff 2', status='live'
)
}[framework_slug]
# Current framework application information
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={"status": "started"},
prefill_declaration_from_framework_slug="digital-outcomes-and-specialists-2"
)
# The previous declaration to prefill from - has relevant answers but should not ever be called
self.data_api_client.get_supplier_declaration.return_value = {
'declaration': self.supplier_framework(
framework_slug="digital-outcomes-and-specialists-2",
declaration={
"status": "complete",
"readUnderstoodGuidance": True,
"understandTool": True,
"understandHowToAskQuestions": False
}
)["frameworkInterest"]["declaration"]
}
# The how-you-apply section has "prefill: False" in the declaration manifest
res = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/edit/how-you-apply'
)
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
# Previous framework and declaration have not been fetched
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9'),
]
assert self.data_api_client.get_supplier_declaration.called is False
# Radio buttons exist on page but have not been populated at all
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-1"]')) == 1
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-2"]')) == 1
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-readUnderstoodGuidance-2"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandTool-1"]')) == 1
assert len(doc.xpath('//input[@id="input-understandTool-2"]')) == 1
assert len(doc.xpath('//input[@id="input-understandTool-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandTool-2"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-1"]')) == 1
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-2"]')) == 1
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-1"]/@checked')) == 0
assert len(doc.xpath('//input[@id="input-understandHowToAskQuestions-2"]/@checked')) == 0
# No blue banner message is shown at top of page
assert len(doc.xpath('//div[@class="banner-information-without-action"]')) == 0
# No blue information messages are shown next to each question
info_messages = doc.xpath('//div[@class="message-wrapper"]//span[@class="message-content"]')
assert len(info_messages) == 0
def test_post_valid_data(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration={"status": "started"}
)
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 302
assert self.data_api_client.set_supplier_declaration.called is True
@mock.patch('dmutils.s3.S3')
def test_post_valid_data_with_document_upload(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started"}
)
with freeze_time('2017-11-12 13:14:15'):
res = self.client.post(
'/suppliers/frameworks/g-cloud-11/declaration/edit/modern-slavery',
data={
'modernSlaveryTurnover': False,
'modernSlaveryReportingRequirements': None,
'mitigatingFactors3': None,
'modernSlaveryStatement': None,
'modernSlaveryStatementOptional': (BytesIO(valid_pdf_bytes), 'document.pdf')
}
)
assert res.status_code == 302
assert self.data_api_client.set_supplier_declaration.call_args_list == [
mock.call(
1234,
"g-cloud-11",
{
'status': 'started',
'modernSlaveryTurnover': False,
'modernSlaveryReportingRequirements': None,
'mitigatingFactors3': None,
'modernSlaveryStatement': None,
'modernSlaveryStatementOptional': 'http://localhost/suppliers/assets/g-cloud-11/documents/1234/modern-slavery-statement-2017-11-12-1314.pdf' # noqa
},
"email@email.com"
)
]
s3.return_value.save.assert_called_once_with(
'g-cloud-11/documents/1234/modern-slavery-statement-2017-11-12-1314.pdf',
mock.ANY, acl='public-read'
)
def test_post_valid_data_to_complete_declaration(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration=FULL_G7_SUBMISSION
)
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/grounds-for-discretionary-exclusion',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers/frameworks/g-cloud-7/declaration'
assert self.data_api_client.set_supplier_declaration.called is True
assert self.data_api_client.set_supplier_declaration.call_args[0][2]['status'] == 'complete'
def test_post_valid_data_with_api_failure(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-7",
declaration={"status": "started"}
)
self.data_api_client.set_supplier_declaration.side_effect = APIError(mock.Mock(status_code=400))
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 400
@mock.patch('app.main.helpers.validation.G7Validator.get_error_messages_for_page')
def test_post_with_validation_errors(self, get_error_messages_for_page):
"""Test that answers are not saved if there are errors
For unit tests of the validation see :mod:`tests.app.main.helpers.test_frameworks`
"""
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
get_error_messages_for_page.return_value = {'PR1': {'input_name': 'PR1', 'message': 'this is invalid'}}
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 400
assert self.data_api_client.set_supplier_declaration.called is False
doc = html.fromstring(res.get_data(as_text=True))
elems = doc.cssselect('#input-PR1-1')
assert elems[0].value == 'True'
def test_post_invalidating_previously_valid_page(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug='g-cloud-9', status='open')
mock_supplier_framework = self.supplier_framework(
framework_slug="g-cloud-9",
declaration={
"status": "started",
"establishedInTheUK": False,
"appropriateTradeRegisters": True,
"appropriateTradeRegistersNumber": "242#353",
"licenceOrMemberRequired": "licensed",
"licenceOrMemberRequiredDetails": "Foo Bar"
}
)
self.data_api_client.get_supplier_framework_info.return_value = mock_supplier_framework
self.data_api_client.get_supplier_declaration.return_value = {
"declaration": mock_supplier_framework["frameworkInterest"]["declaration"]
}
res = self.client.post(
'/suppliers/frameworks/g-cloud-9/declaration/edit/established-outside-the-uk',
data={
"establishedInTheUK": "False",
"appropriateTradeRegisters": "True",
"appropriateTradeRegistersNumber": "242#353",
"licenceOrMemberRequired": "licensed",
# deliberately missing:
"licenceOrMemberRequiredDetails": "",
},
)
assert res.status_code == 400
assert self.data_api_client.set_supplier_declaration.called is False
def test_cannot_post_data_if_not_open(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_declaration.return_value = {
"declaration": {"status": "started"}
}
res = self.client.post(
'/suppliers/frameworks/g-cloud-7/declaration/edit/g-cloud-7-essentials',
data=FULL_G7_SUBMISSION
)
assert res.status_code == 404
assert self.data_api_client.set_supplier_declaration.called is False
@mock.patch('dmutils.s3.S3')
def test_post_declaration_answer_with_document_upload_errors(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started"}
)
with freeze_time('2017-11-12 13:14:15'):
res = self.client.post(
'/suppliers/frameworks/g-cloud-11/declaration/edit/modern-slavery',
data={
'modernSlaveryTurnover': False,
'modernSlaveryReportingRequirements': None,
'mitigatingFactors3': None,
'modernSlaveryStatement': None,
'modernSlaveryStatementOptional': (BytesIO(b"doc"), 'document.doc')
}
)
assert res.status_code == 400
doc = html.fromstring(res.get_data(as_text=True))
assert len(doc.xpath(
"//*[contains(@class,'validation-message')][contains(normalize-space(string()), $text)]",
text="Your document is not in an open format.",
)) == 1
assert self.data_api_client.set_supplier_declaration.called is False
assert s3.return_value.save.called is False
@mock.patch('dmutils.s3.S3')
def test_post_declaration_answer_with_existing_document(self, s3):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started", "modernSlaveryStatement": "path/to/existing/upload"}
)
with freeze_time('2017-11-12 13:14:15'):
res = self.client.post(
'/suppliers/frameworks/g-cloud-11/declaration/edit/modern-slavery',
data={
'modernSlaveryTurnover': True,
'modernSlaveryReportingRequirements': True,
'mitigatingFactors3': None,
}
)
assert res.status_code == 302
assert self.data_api_client.set_supplier_declaration.called
assert s3.return_value.save.called is False
def test_has_session_timeout_warning(self):
self.data_api_client.get_framework.return_value = self.framework(status='open', slug="g-cloud-11")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug="g-cloud-11",
declaration={"status": "started"}
)
with freeze_time("2019-11-12 13:14:15"):
self.login() # need to login after freezing time
doc = html.fromstring(
self.client.get(f"/suppliers/frameworks/g-cloud-11/declaration/edit/contact-details").data
)
assert "2:14pm GMT" in doc.xpath("string(.//div[@id='session-timeout-warning'])")
@mock.patch('dmutils.s3.S3')
class TestFrameworkUpdatesPage(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def _assert_page_title_and_table_headings(self, doc, check_for_tables=True):
assert self.strip_all_whitespace('G-Cloud 7 updates') in self.strip_all_whitespace(doc.xpath('//h1')[0].text)
headers = doc.xpath('//div[@class="govuk-grid-column-full"]//h2 | //table//caption//span')
assert len(headers) == 2
assert self.strip_all_whitespace(headers[0].text) == 'Communications'
assert self.strip_all_whitespace(headers[1].text) == 'Clarificationquestionsandanswers'
if check_for_tables:
table_captions = doc.xpath('//div/table/caption/span')
assert len(table_captions) == 2
assert self.strip_all_whitespace(table_captions[0].text) == 'Communications'
assert self.strip_all_whitespace(table_captions[1].text) == 'Clarificationquestionsandanswers'
def test_should_be_a_503_if_connecting_to_amazon_fails(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
# if s3 throws a 500-level error
s3.side_effect = S3ResponseError(
{'Error': {'Code': 500, 'Message': 'Amazon has collapsed. The internet is over.'}},
'test_should_be_a_503_if_connecting_to_amazon_fails'
)
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
assert response.status_code == 503
doc = html.fromstring(response.get_data(as_text=True))
assert doc.xpath('//h1/text()')[0] == "Sorry, we’re experiencing technical difficulties"
def test_empty_messages_exist_if_no_files_returned(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
assert response.status_code == 200
doc = html.fromstring(response.get_data(as_text=True))
self._assert_page_title_and_table_headings(doc, check_for_tables=False)
response_text = self.strip_all_whitespace(response.get_data(as_text=True))
assert (
self.strip_all_whitespace('<p class="govuk-body">No communications have been sent out.</p>')
in response_text
)
assert (
self.strip_all_whitespace(
'<p class="govuk-body">No clarification questions and answers have been posted yet.</p>'
)
in response_text
)
def test_dates_for_open_framework_closed_for_questions(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open', clarification_questions_open=False)
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
data = response.get_data(as_text=True)
assert response.status_code == 200
assert 'All clarification questions and answers will be published ' \
'by 5pm BST, Tuesday 29 September 2015.' in data
assert "You can ask clarification questions until " not in data
def test_dates_for_open_framework_open_for_questions(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open', clarification_questions_open=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
data = response.get_data(as_text=True)
assert response.status_code == 200
assert 'All clarification questions and answers will be published ' \
'by 5pm BST, Tuesday 29 September 2015.' not in data
assert 'You can ask clarification questions until 5pm BST, Tuesday 22 September 2015.' in data
def test_the_tables_should_be_displayed_correctly(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
files = [
('updates/communications/', 'file 1', 'odt'),
('updates/communications/', 'file 2', 'odt'),
('updates/clarifications/', 'file 3', 'odt'),
('updates/clarifications/', 'file 4', 'odt'),
]
# the communications table is always before the clarifications table
s3.return_value.list.return_value = [
_return_fake_s3_file_dict("g-cloud-7/communications/{}".format(section), filename, ext)
for section, filename, ext
in files
]
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
doc = html.fromstring(response.get_data(as_text=True))
self._assert_page_title_and_table_headings(doc)
tables = doc.xpath('//div[contains(@class, "updates-document-tables")]/table')
# test that for each table, we have the right number of rows
for table in tables:
item_rows = table.findall('.//tr[@class="summary-item-row"]')
assert len(item_rows) == 2
# test that the file names and urls are right
for row in item_rows:
section, filename, ext = files.pop(0)
filename_link = row.find('.//a[@class="document-link-with-icon"]')
assert filename in filename_link.text_content()
assert filename_link.get('href') == '/suppliers/frameworks/g-cloud-7/files/{}{}.{}'.format(
section,
filename.replace(' ', '%20'),
ext,
)
def test_names_with_the_section_name_in_them_will_display_correctly(self, s3):
self.data_api_client.get_framework.return_value = self.framework('open')
# for example: 'g-cloud-7-updates/clarifications/communications%20file.odf'
files = [
('updates/communications/', 'clarifications file', 'odt'),
('updates/clarifications/', 'communications file', 'odt')
]
s3.return_value.list.return_value = [
_return_fake_s3_file_dict("g-cloud-7/communications/{}".format(section), filename, ext)
for section, filename, ext
in files
]
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
doc = html.fromstring(response.get_data(as_text=True))
self._assert_page_title_and_table_headings(doc)
tables = doc.xpath('//div[contains(@class, "updates-document-tables")]/table')
# test that for each table, we have the right number of rows
for table in tables:
item_rows = table.findall('.//tr[@class="summary-item-row"]')
assert len(item_rows) == 1
# test that the file names and urls are right
for row in item_rows:
section, filename, ext = files.pop(0)
filename_link = row.find('.//a[@class="document-link-with-icon"]')
assert filename in filename_link.text_content()
assert filename_link.get('href') == '/suppliers/frameworks/g-cloud-7/files/{}{}.{}'.format(
section,
filename.replace(' ', '%20'),
ext,
)
@pytest.mark.parametrize('countersigned_path, contact_link_shown', [("path", False), (None, True)])
def test_contact_link_only_shown_if_countersigned_agreement_is_not_yet_returned(
self, s3, countersigned_path, contact_link_shown
):
self.data_api_client.get_framework.return_value = self.framework('live', clarification_questions_open=False)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
countersigned_path=countersigned_path
)
self.login()
response = self.client.get('/suppliers/frameworks/g-cloud-7/updates')
data = response.get_data(as_text=True)
assert response.status_code == 200
assert ('Contact the support team' in data) == contact_link_shown
@mock.patch('app.main.views.frameworks.DMNotifyClient.send_email', autospec=True)
class TestSendClarificationQuestionEmail(BaseApplicationTest):
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@mock.patch('dmutils.s3.S3')
def _send_email(self, s3, message):
self.login()
return self.client.post(
"/suppliers/frameworks/g-cloud-7/updates",
data={'clarification_question': message}
)
def test_should_call_send_email_with_correct_params_if_clarification_questions_open(self, notify_send_email):
self.data_api_client.get_framework.return_value = self.framework(
'open', name='Test Framework', clarification_questions_open=True
)
clarification_question = 'This is a clarification question.'
with freeze_time('2019-07-02 01:02:03'):
res = self._send_email(message=clarification_question)
# Assert Notify email 1 is sent (clarification question)
# Assert Notify email 2 is sent (receipt)
notify_send_email.assert_has_calls(
[
mock.call(
mock.ANY,
to_email_address="clarification-questions@example.gov.uk",
template_name_or_id='framework-clarification-question',
personalisation={
"framework_name": "Test Framework",
"supplier_id": 1234,
"supplier_name": "My Little Company",
"supplier_reference": "2019-07-02-JRX8IN",
"clarification_question": clarification_question,
},
reference=(
"fw-clarification-question-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"9B7i7y6lXFmVCHXyU7sP0nkdNK6l8B98xRimoHMzpAw="
),
allow_resend=True,
),
mock.call(
mock.ANY, # DMNotifyClient
to_email_address="email@email.com",
template_name_or_id='confirmation_of_clarification_question',
personalisation={
'user_name': 'Năme',
'framework_name': 'Test Framework',
"supplier_reference": "2019-07-02-JRX8IN",
'clarification_question_text': clarification_question,
},
reference=(
"fw-clarification-question-confirm-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50="
),
reply_to_address_id='24908180-b64e-513d-ab48-fdca677cec52',
)
]
)
# Assert audit event
self.data_api_client.create_audit_event.assert_called_with(
audit_type=AuditTypes.send_clarification_question,
user="email@email.com",
object_type="suppliers",
object_id=1234,
data={"question": clarification_question, 'framework': 'g-cloud-7'}
)
assert res.status_code == 200
# Assert flash message
doc = html.fromstring(res.get_data(as_text=True))
flash_message = doc.cssselect(".dm-alert")[0]
assert (
flash_message.cssselect(".dm-alert__title")[0].text.strip()
==
"Your clarification question has been sent. Answers to all "
"clarification questions will be published on this page."
)
def test_email_not_sent_if_clarification_questions_closed(self, notify_send_email):
self.data_api_client.get_framework.return_value = self.framework(
'open', name='Test Framework', clarification_questions_open=False
)
response = self._send_email(message='I have missed the clarification question deadline!')
assert response.status_code == 400
assert notify_send_email.called is False
assert self.data_api_client.create_audit_event.called is False
@pytest.mark.parametrize(
'invalid_clarification_question',
(
# Empty question
{'question': '', 'error_message': 'Add text if you want to ask a question.'},
# Whitespace only question
{'question': '\t \n\n\n', 'error_message': 'Add text if you want to ask a question.'},
# Question length > 5000 characters
{'question': ('ten__chars' * 500) + '1', 'error_message': 'Question cannot be longer than 5000 characters'}
)
)
def test_should_not_send_email_if_invalid_clarification_question(
self,
notify_send_email,
invalid_clarification_question,
):
self.data_api_client.get_framework.return_value = self.framework('open')
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework()
response = self._send_email(message=invalid_clarification_question['question'])
# Assert no audit
assert self.data_api_client.create_audit_event.call_count == 0
# Assert no emails sent
assert notify_send_email.call_count == 0
assert response.status_code == 400
# Assert error message shown
assert (
self.strip_all_whitespace('There was a problem with your submitted question')
in self.strip_all_whitespace(response.get_data(as_text=True))
)
assert (
self.strip_all_whitespace(invalid_clarification_question['error_message'])
in self.strip_all_whitespace(response.get_data(as_text=True))
)
def test_should_be_a_503_if_email_fails(self, notify_send_email):
self.data_api_client.get_framework.return_value = self.framework('open', name='Test Framework')
notify_send_email.side_effect = EmailError("Arrrgh")
clarification_question = 'This is a clarification question.'
with freeze_time('2019-07-02 01:02:03'):
response = self._send_email(message=clarification_question)
# Assert send_email is called only once
notify_send_email.assert_called_once_with(
mock.ANY,
to_email_address="clarification-questions@example.gov.uk",
template_name_or_id='framework-clarification-question',
personalisation={
"framework_name": "Test Framework",
"supplier_id": 1234,
"supplier_name": "My Little Company",
"supplier_reference": "2019-07-02-JRX8IN",
"clarification_question": clarification_question,
},
reference=(
"fw-clarification-question-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"9B7i7y6lXFmVCHXyU7sP0nkdNK6l8B98xRimoHMzpAw="
),
allow_resend=True,
)
# Assert no audit
assert self.data_api_client.create_audit_event.call_count == 0
assert response.status_code == 503
def test_should_fail_silently_if_receipt_email_fails(self, notify_send_email):
notify_send_email.side_effect = [None, EmailError("Arrrgh")]
self.data_api_client.get_framework.return_value = self.framework('open', name='Test Framework',
clarification_questions_open=True)
clarification_question = 'This is a clarification question.'
with freeze_time('2019-07-02 01:02:03'):
response = self._send_email(message=clarification_question)
# first email sends, second email fails
notify_send_email.assert_has_calls(
[
mock.call(
mock.ANY,
to_email_address="clarification-questions@example.gov.uk",
template_name_or_id="framework-clarification-question",
personalisation={
"framework_name": "Test Framework",
"supplier_id": 1234,
"supplier_name": "My Little Company",
"supplier_reference": "2019-07-02-JRX8IN",
"clarification_question": clarification_question,
},
reference=(
"fw-clarification-question-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"9B7i7y6lXFmVCHXyU7sP0nkdNK6l8B98xRimoHMzpAw="
),
allow_resend=True,
),
mock.call(
mock.ANY, # DMNotifyClient
to_email_address="email@email.com",
template_name_or_id='confirmation_of_clarification_question',
personalisation={
'user_name': 'Năme',
'framework_name': 'Test Framework',
"supplier_reference": "2019-07-02-JRX8IN",
'clarification_question_text': clarification_question,
},
reference=(
"fw-clarification-question-confirm-"
"42c1W5KnFy1IaDtDEnNsOChYYluckBo_mzTuRxQawFo=-"
"8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50="
),
reply_to_address_id='24908180-b64e-513d-ab48-fdca677cec52',
)
]
)
# assert reached end of view and redirected
assert response.status_code == 200
@mock.patch('app.main.views.frameworks.count_unanswered_questions')
class TestFrameworkSubmissionLots(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.get_metadata_patch = mock.patch('app.main.views.frameworks.content_loader.get_metadata')
self.get_metadata = self.get_metadata_patch.start()
self.get_metadata.return_value = 'g-cloud-6'
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
self.get_metadata_patch.stop()
def test_drafts_list_progress_count(self, count_unanswered):
self.login()
count_unanswered.return_value = 3, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
assert u'1 draft service' in submissions.get_data(as_text=True)
assert u'complete service' not in submissions.get_data(as_text=True)
@pytest.mark.parametrize('framework_slug, show_service_data', (
('digital-outcomes-and-specialists-2', 0),
('g-cloud-9', 1),
))
def test_submission_lots_page_shows_use_of_service_data_if_g_cloud_family(
self, count_unanswered, framework_slug, show_service_data
):
self.login()
self.data_api_client.get_framework.return_value = self.framework(slug=framework_slug, status="open")
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
framework_slug=framework_slug
)
res = self.client.get(f"/suppliers/frameworks/{framework_slug}/submissions")
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
use_of_data = doc.xpath('//div[contains(@class, "use-of-service-data")]')
assert len(use_of_data) == show_service_data
if show_service_data:
assert 'The service information you provide here:' in use_of_data[0].text_content()
@pytest.mark.parametrize(
'declaration, should_show_declaration_link, declaration_link_url',
(
({'declaration': {}}, True, '/suppliers/frameworks/g-cloud-7/declaration/start'),
({'declaration': {'status': 'started'}}, True, '/suppliers/frameworks/g-cloud-7/declaration'),
({'declaration': {}}, True, '/suppliers/frameworks/g-cloud-7/declaration/start'),
({'declaration': {'status': 'started'}}, True, '/suppliers/frameworks/g-cloud-7/declaration'),
({'declaration': {'status': 'complete'}}, False, None),
({'declaration': {'status': 'complete'}}, False, None),
)
)
def test_banner_on_submission_lot_page_shows_link_to_declaration(
self, count_unanswered, declaration, should_show_declaration_link, declaration_link_url
):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.get_supplier_declaration.return_value = declaration
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
if should_show_declaration_link:
doc = html.fromstring(submissions.get_data(as_text=True))
assert doc.xpath('//*[@class="banner-information-without-action"]')
decl_element = doc.xpath(
"//*[contains(@class,'banner-content')][contains(normalize-space(string()), $text)]",
text="make your supplier declaration",
)
assert decl_element[0].xpath('.//a[@href=$url]', url=declaration_link_url)
else:
# Application is done - don't show warning banner
assert "Your application is not complete" not in submissions.get_data(as_text=True)
@pytest.mark.parametrize(
"incomplete_declaration,expected_url",
(
({}, "/suppliers/frameworks/g-cloud-7/declaration/start"),
({"status": "started"}, "/suppliers/frameworks/g-cloud-7/declaration")
)
)
def test_drafts_list_completed(self, count_unanswered, incomplete_declaration, expected_url):
self.login()
count_unanswered.return_value = 0, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': incomplete_declaration}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
submissions_html = submissions.get_data(as_text=True)
assert u'1 service marked as complete' in submissions_html
assert u'draft service' not in submissions_html
assert "Your application is not complete" in submissions_html
doc = html.fromstring(submissions_html)
assert doc.xpath('//*[@class="banner-information-without-action"]')
decl_element = doc.xpath(
"//*[contains(@class,'banner-content')][contains(normalize-space(string()), $text)]",
text="make your supplier declaration",
)
assert decl_element[0].xpath('.//a[@href=$url]', url=expected_url)
def test_drafts_list_completed_with_declaration_status(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
submissions_html = submissions.get_data(as_text=True)
assert u'1 service will be submitted' in submissions_html
assert u'1 complete service was submitted' not in submissions_html
assert u'browse-list-item-status-happy' in submissions_html
assert "Your application is not complete" not in submissions_html
def test_drafts_list_services_were_submitted(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'},
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'},
]
submissions = self.client.get('/suppliers/frameworks/g-cloud-7/submissions')
assert u'1 complete service was submitted' in submissions.get_data(as_text=True)
def test_dos_drafts_list_with_open_framework(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
slug='digital-outcomes-and-specialists',
status='open'
)
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'digital-specialists', 'status': 'submitted'}
]
submissions = self.client.get('/suppliers/frameworks/digital-outcomes-and-specialists/submissions')
assert u'This will be submitted' in submissions.get_data(as_text=True)
assert u'browse-list-item-status-happy' in submissions.get_data(as_text=True)
assert u'Apply to provide' in submissions.get_data(as_text=True)
assert "Your application is not complete" not in submissions.get_data(as_text=True)
def test_dos_drafts_list_with_closed_framework(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
slug="digital-outcomes-and-specialists",
status='pending'
)
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'digital-specialists', 'status': 'not-submitted'},
{'serviceName': 'draft', 'lotSlug': 'digital-specialists', 'status': 'submitted'},
]
submissions = self.client.get('/suppliers/frameworks/digital-outcomes-and-specialists/submissions')
assert submissions.status_code == 200
assert u'Submitted' in submissions.get_data(as_text=True)
assert u'Apply to provide' not in submissions.get_data(as_text=True)
@mock.patch('app.main.views.frameworks.count_unanswered_questions')
class TestG12RecoveryDraftServices(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.get_metadata_patch = mock.patch('app.main.views.frameworks.content_loader.get_metadata')
self.get_metadata = self.get_metadata_patch.start()
self.get_metadata.return_value = 'g-cloud-12'
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
self.get_metadata_patch.stop()
@pytest.mark.parametrize('status, response_code', [('open', 404), ('live', 200)])
def test_page_exists(self, count_unanswered, status, response_code):
self.login(supplier_id=577184)
self.data_api_client.get_framework.return_value = self.framework(slug='g-cloud-12', status=status)
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
assert response.status_code == response_code
def test_page_exists_only_for_g_cloud_12(self, count_unanswered):
self.login(supplier_id=577184)
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-11/draft-services')
assert response.status_code == 404
@pytest.mark.parametrize('supplier_id, response_code', [(1, 404), (577184, 200)])
def test_page_exists_for_recovery_suppliers_only(self, count_unanswered, supplier_id, response_code):
self.login(supplier_id=supplier_id)
self.data_api_client.get_framework.return_value = self.framework(slug='g-cloud-12', status='live')
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
assert response.status_code == response_code
def test_page_renders(self, count_unanswered):
self.login(supplier_id=577184)
self.data_api_client.get_framework.return_value = FrameworkStub(slug="g-cloud-12", status="live")\
.single_result_response()
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
doc = html.fromstring(response.get_data(as_text=True))
assert doc.cssselect("h1:contains('Your G-Cloud 12 services')")
assert [el.text for el in doc.cssselect(".browse-list a")] == [
"Cloud hosting", "Cloud software", "Cloud support"
]
def test_lot_status_includes_number_of_draft_and_completed_services(self, count_unanswered):
self.login(supplier_id=577184)
self.data_api_client.get_framework.return_value = FrameworkStub(slug="g-cloud-12", status="live")\
.single_result_response()
self.data_api_client.get_supplier_declaration.return_value = {'declaration': {'status': 'complete'}}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'cloud-hosting', 'status': 'not-submitted'},
{'serviceName': 'completed', 'lotSlug': 'cloud-hosting', 'status': 'submitted'},
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
with self.app.app_context():
response = self.client.get('/suppliers/frameworks/g-cloud-12/draft-services')
raw_html = response.get_data(as_text=True)
assert "1 draft service" in raw_html
assert "1 service will be submitted" in raw_html
assert "1 complete service was submitted" not in raw_html
assert 'browse-list-item-status-happy' in raw_html
assert "Your application is not complete" not in raw_html
@mock.patch('app.main.views.frameworks.count_unanswered_questions')
class TestFrameworkSubmissionServices(BaseApplicationTest, MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
def setup_method(self, method):
super().setup_method(method)
self.get_metadata_patch = mock.patch('app.main.views.frameworks.content_loader.get_metadata')
self.get_metadata = self.get_metadata_patch.start()
self.get_metadata.return_value = 'g-cloud-6'
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
self.get_metadata_patch.stop()
def _assert_incomplete_application_banner_not_visible(self, html):
assert "Your application is not complete" not in html
def _assert_incomplete_application_banner(self,
response_html,
decl_item_href=None):
doc = html.fromstring(response_html)
assert "Your application is not complete" in response_html
assert doc.xpath('//*[@class="banner-information-without-action"]')
decl_element = doc.xpath(
"//*[contains(@class,'banner-content')][contains(normalize-space(string()), $text)]",
text="make your supplier declaration",
)
assert decl_element
if decl_item_href:
assert decl_element[0].xpath('.//a[@href=$url]', url=decl_item_href)
@pytest.mark.parametrize(
'framework_status, msg',
[
('open', 'Add a service'),
('pending', 'You didn’t mark any services as complete.')
]
)
def test_services_list_open_or_pending_no_complete_services(self, count_unanswered, framework_status, msg):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status=framework_status)
self.data_api_client.find_draft_services_iter.return_value = []
count_unanswered.return_value = 0
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/iaas')
assert response.status_code == 200
assert msg in response.get_data(as_text=True)
@pytest.mark.parametrize('framework_status', ['open', 'pending'])
def test_services_list_open_or_pending_and_no_declaration(self, count_unanswered, framework_status):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status=framework_status)
self.data_api_client.get_supplier_declaration.return_value = {
"declaration": {"status": "started"}
}
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/iaas')
assert response.status_code == 200
assert 'You made your supplier declaration' not in response.get_data(as_text=True)
def test_services_list_shows_g7_message_if_pending_and_application_made(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_declaration.return_value = self.supplier_framework()['frameworkInterest']
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
count_unanswered.return_value = 0, 1
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(response.get_data(as_text=True))
assert response.status_code == 200
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
assert "You made your supplier declaration and submitted 1 complete service." in \
heading[0].xpath('../p[1]/text()')[0]
self._assert_incomplete_application_banner_not_visible(response.get_data(as_text=True))
def test_shows_g7_message_if_pending_and_services_not_submitted(self, count_unanswered):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='pending')
self.data_api_client.get_supplier_declaration.return_value = self.supplier_framework()['frameworkInterest']
self.data_api_client.get_supplier.return_value = SupplierStub().single_result_response()
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
count_unanswered.return_value = 0, 1
response = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(response.get_data(as_text=True))
assert response.status_code == 200
heading = doc.xpath('//div[@class="summary-item-lede"]//h2[@class="summary-item-heading"]')
assert len(heading) > 0
assert "G-Cloud 7 is closed for applications" in heading[0].xpath('text()')[0]
assert "You made your supplier declaration and submitted 0 complete services." in \
heading[0].xpath('../p[1]/text()')[0]
assert "These services were not completed" in doc.xpath('//main//p[@class="hint"]')[0].xpath('text()')[0]
self._assert_incomplete_application_banner_not_visible(response.get_data(as_text=True))
def test_drafts_list_progress_count(self, count_unanswered):
self.login()
count_unanswered.return_value = 3, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
lot_page = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
assert u'Service can be moved to complete' not in lot_page.get_data(as_text=True)
assert u'4 unanswered questions' in lot_page.get_data(as_text=True)
def test_drafts_list_can_be_completed(self, count_unanswered):
self.login()
count_unanswered.return_value = 0, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'not-submitted'}
]
res = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
assert u'Service can be marked as complete' in res.get_data(as_text=True)
assert u'1 optional question unanswered' in res.get_data(as_text=True)
@pytest.mark.parametrize(
"incomplete_declaration,expected_url",
(
({}, "/suppliers/frameworks/g-cloud-7/declaration/start"),
({"status": "started"}, "/suppliers/frameworks/g-cloud-7/declaration")
)
)
def test_drafts_list_completed(self, count_unanswered, incomplete_declaration, expected_url):
self.login()
count_unanswered.return_value = 0, 1
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.data_api_client.get_supplier_declaration.return_value = {'declaration': incomplete_declaration}
self.data_api_client.find_draft_services_iter.return_value = [
{'serviceName': 'draft', 'lotSlug': 'scs', 'status': 'submitted'}
]
self.data_api_client.get_supplier.return_value = SupplierStub(
company_details_confirmed=False
).single_result_response()
lot_page = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
lot_page_html = lot_page.get_data(as_text=True)
assert u'Service can be moved to complete' not in lot_page_html
self._assert_incomplete_application_banner(lot_page_html, decl_item_href=expected_url)
@pytest.mark.parametrize(
('copied', 'link_shown'),
(
((False, False, False), True),
((True, False, True), True),
((True, True, True), False),
)
)
def test_drafts_list_has_link_to_add_published_services_if_any_services_not_yet_copied(
self, count_unanswered, copied, link_shown
):
self.data_api_client.find_services.return_value = {
'services': [
{'question1': 'answer1', 'copiedToFollowingFramework': copied[0]},
{'question2': 'answer2', 'copiedToFollowingFramework': copied[1]},
{'question2': 'answer2', 'copiedToFollowingFramework': copied[2]},
],
}
self.data_api_client.get_framework.return_value = self.framework(status='open')
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(res.get_data(as_text=True))
link = doc.xpath(
"//*[@id='main-content']/p[1]/a[normalize-space(string())='View and add your services from G-Cloud\xa07']"
)
assert self.data_api_client.find_services.call_args_list == [
mock.call(
supplier_id=1234,
framework='g-cloud-6',
lot='scs',
status='published',
)
]
if link_shown:
assert link
assert '/suppliers/frameworks/g-cloud-7/submissions/scs/previous-services' in link[0].values()
else:
assert not link
def test_link_to_add_previous_services_not_shown_if_no_defined_previous_framework(self, count_unanswered):
self.get_metadata.side_effect = ContentNotFoundError('Not found')
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-7/submissions/scs')
doc = html.fromstring(res.get_data(as_text=True))
assert not doc.xpath("//a[normalize-space(string())='View and add your services from G-Cloud\xa07']")
def test_redirect_to_previous_services_for_lot_with_one_service_limit_and_no_drafts_and_previous_service_to_copy(
self, count_unanswered
):
self.data_api_client.get_framework.return_value = self.framework(slug='digital-outcomes-and-specialists-3')
self.data_api_client.find_draft_services_iter.return_value = []
self.get_metadata.return_value = 'digital-outcomes-and-specialists-2'
self.data_api_client.find_services.return_value = {"services": [{"copiedToFollowingFramework": False}]}
self.login()
res = self.client.get('/suppliers/frameworks/digital-outcomes-and-specialists-3/submissions/digital-outcomes')
assert res.status_code == 302
assert '/digital-outcomes-and-specialists-3/submissions/digital-outcomes/previous-services' in res.location
def test_500s_if_previous_framework_not_found(self, count_unanswered):
self.data_api_client.get_framework.side_effect = [
self.framework(slug='g-cloud-10'),
HTTPError(mock.Mock(status_code=404)),
]
self.data_api_client.find_draft_services_iter.return_value = []
self.login()
res = self.client.get('/suppliers/frameworks/g-cloud-10/submissions/cloud-hosting')
assert res.status_code == 500
class TestContractVariation(BaseApplicationTest):
def setup_method(self, method):
super(TestContractVariation, self).setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.good_supplier_framework = self.supplier_framework(
declaration={'nameOfOrganisation': 'A.N. Supplier',
'primaryContactEmail': 'bigboss@email.com'},
on_framework=True,
agreement_returned=True,
agreement_details={}
)
self.g8_framework = self.framework(
name='G-Cloud 8',
slug='g-cloud-8',
status='live',
framework_agreement_version='3.1'
)
self.g8_framework['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.g9_framework = self.framework(
name='G-Cloud 9',
slug='g-cloud-9',
status='live',
framework_agreement_version='3.1'
)
self.g9_framework['frameworks']['variations'] = {"1": {"createdAt": "2018-08-16"}}
self.login()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_get_page_renders_if_all_ok(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
doc = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 200
assert len(doc.xpath('//h1[contains(text(), "Accept the contract variation for G-Cloud 8")]')) == 1
def test_supplier_must_be_on_framework(self):
supplier_not_on_framework = self.good_supplier_framework.copy()
supplier_not_on_framework['frameworkInterest']['onFramework'] = False
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = supplier_not_on_framework
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
assert res.status_code == 404
def test_variation_must_exist(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
# There is no variation number 2
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/2")
assert res.status_code == 404
def test_agreement_must_be_returned_already(self):
agreement_not_returned = self.good_supplier_framework.copy()
agreement_not_returned['frameworkInterest']['agreementReturned'] = False
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = agreement_not_returned
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
assert res.status_code == 404
def test_shows_form_if_not_yet_agreed(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
doc = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 200
assert len(doc.xpath('//label[contains(text(), "I accept these changes")]')) == 1
assert len(doc.xpath('//button[normalize-space(string())=$t]', t="I accept")) == 1
def test_shows_signer_details_and_no_form_if_already_agreed(self):
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
}}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
page_text = res.get_data(as_text=True)
doc = html.fromstring(page_text)
assert res.status_code == 200
assert len(doc.xpath('//h2[contains(text(), "Contract variation status")]')) == 1
assert (
"<span>William Drăyton<br />agreed@email.com<br />Friday 19 August 2016 at 4:47pm BST</span>" in page_text
)
assert "<span>Waiting for CCS to countersign</span>" in page_text
assert len(doc.xpath('//label[contains(text(), "I accept these proposed changes")]')) == 0
assert len(doc.xpath('//input[@value="I accept"]')) == 0
def test_shows_signer_details_and_different_text_if_already_agreed_but_no_countersign(self):
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
}}
self.data_api_client.get_framework.return_value = self.g9_framework
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.get("/suppliers/frameworks/g-cloud-9/contract-variation/1")
page_text = res.get_data(as_text=True)
doc = html.fromstring(page_text)
assert res.status_code == 200
assert len(doc.xpath('//h1[contains(text(), "The contract variation for G-Cloud 9")]')) == 1
assert len(doc.xpath('//h2[contains(text(), "Contract variation status")]')) == 1
assert (
"<span>William Drăyton<br />agreed@email.com<br />Friday 19 August 2016 at 4:47pm BST</span>" in page_text
)
assert "<span>Waiting for CCS to countersign</span>" in page_text
assert "You have accepted the Crown Commercial Service’s changes to the framework agreement" in page_text
assert "They will come into effect when CCS has countersigned them." in page_text
assert len(doc.xpath('//label[contains(text(), "I accept these proposed changes")]')) == 0
assert len(doc.xpath('//input[@value="I accept"]')) == 0
def test_shows_updated_heading_and_countersigner_details_but_no_form_if_countersigned(self):
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drăyton",
}}
g8_with_countersigned_variation = self.framework(status='live', name='G-Cloud 8')
g8_with_countersigned_variation['frameworks']['variations'] = {"1": {
"createdAt": "2016-08-01T12:30:00.000000Z",
"countersignedAt": "2016-10-01T02:00:00.000000Z",
"countersignerName": "A.N. Other",
"countersignerRole": "Head honcho",
}
}
self.data_api_client.get_framework.return_value = g8_with_countersigned_variation
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.get("/suppliers/frameworks/g-cloud-8/contract-variation/1")
page_text = res.get_data(as_text=True)
doc = html.fromstring(page_text)
assert res.status_code == 200
assert len(doc.xpath('//h1[contains(text(), "The contract variation for G-Cloud 8")]')) == 1
assert len(doc.xpath('//h2[contains(text(), "Contract variation status")]')) == 1
assert "<span>A.N. Other<br />Head honcho<br />Saturday 1 October 2016</span>" in page_text
assert len(doc.xpath('//label[contains(text(), "I accept these proposed changes")]')) == 0
assert len(doc.xpath('//input[@value="I accept"]')) == 0
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True, create=True)
def test_email_is_sent_to_correct_users(self, mocked_notify_class):
mocked_notify_client = mocked_notify_class.return_value
mocked_notify_client.templates = {'g-cloud-8_variation_1_agreed': 123456789}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"}
)
assert res.status_code == 302
assert res.location == "http://localhost/suppliers/frameworks/g-cloud-8/contract-variation/1"
self.data_api_client.agree_framework_variation.assert_called_once_with(
1234, 'g-cloud-8', '1', 123, 'email@email.com'
)
boss_email = mock.call(
'bigboss@email.com', template_name_or_id=123456789, personalisation={'framework_name': 'g-cloud-8'},
reference="contract-variation-agreed-confirmation-ouj_ZOpWHvitNdb7O7DDQGEB-lstuMfj9oEl5oWU4C0="
)
regular_email = mock.call(
'email@email.com', template_name_or_id=123456789, personalisation={'framework_name': 'g-cloud-8'},
reference="contract-variation-agreed-confirmation-8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50="
)
mocked_notify_client.send_email.assert_has_calls([boss_email, regular_email], any_order=False)
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_only_one_email_sent_if_user_is_framework_contact(self, mocked_notify_class):
same_email_as_current_user = self.good_supplier_framework.copy()
same_email_as_current_user['frameworkInterest']['declaration']['primaryContactEmail'] = 'email@email.com'
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = same_email_as_current_user
mocked_notify_client = mocked_notify_class.return_value
mocked_notify_client.templates = {'g-cloud-8_variation_1_agreed': 123456789}
self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"}
)
mocked_notify_client.send_email.assert_called_once_with(
to_email_address='email@email.com',
personalisation={'framework_name': 'g-cloud-8'},
template_name_or_id=123456789,
reference='contract-variation-agreed-confirmation-8yc90Y2VvBnVHT5jVuSmeebxOCRJcnKicOe7VAsKu50='
)
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_success_message_is_displayed_on_success(self, mocked_notify_class):
mocked_notify_client = mocked_notify_class.return_value
mocked_notify_client.templates = {'g-cloud-8_variation_1_agreed': 123456789}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"},
follow_redirects=True
)
doc = html.fromstring(res.get_data(as_text=True))
assert mocked_notify_client.send_email.called
assert res.status_code == 200
assert len(
doc.cssselect(".dm-alert:contains('You have accepted the proposed changes.')")
) == 1
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_api_is_not_called_and_no_email_sent_for_subsequent_posts(self, mocked_notify_class):
mocked_notify_client = mocked_notify_class.return_value
already_agreed = self.good_supplier_framework.copy()
already_agreed['frameworkInterest']['agreedVariations'] = {
"1": {
"agreedAt": "2016-08-19T15:47:08.116613Z",
"agreedUserId": 1,
"agreedUserEmail": "agreed@email.com",
"agreedUserName": "William Drayton",
}
}
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = already_agreed
res = self.client.post(
"/suppliers/frameworks/g-cloud-8/contract-variation/1",
data={"accept_changes": "Yes"}
)
assert res.status_code == 200
assert self.data_api_client.agree_framework_variation.called is False
assert mocked_notify_client.called is False
def test_error_if_box_not_ticked(self):
self.data_api_client.get_framework.return_value = self.g8_framework
self.data_api_client.get_supplier_framework_info.return_value = self.good_supplier_framework
res = self.client.post("/suppliers/frameworks/g-cloud-8/contract-variation/1", data={})
doc = html.fromstring(res.get_data(as_text=True))
assert res.status_code == 400
validation_message = "You need to accept these changes to continue."
assert len(
doc.xpath('//span[@class="validation-message"][contains(text(), "{}")]'.format(validation_message))
) == 1
class TestReuseFrameworkSupplierDeclaration(BaseApplicationTest,
MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
"""Tests for frameworks/<framework_slug>/declaration/reuse view."""
def setup_method(self, method):
super(TestReuseFrameworkSupplierDeclaration, self).setup_method(method)
self.login()
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.framework_stub = FrameworkStub(
name='g-cloud-8',
slug='g-cloud-8',
allow_declaration_reuse=True,
applications_close_at=datetime(2009, 12, 3, 1, 1, 1)
).single_result_response()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_reusable_declaration_framework_slug_param(self):
"""Ensure that when using the param to specify declaration we collect the correct declaration."""
self.data_api_client.get_framework.return_value = self.framework_stub
self.data_api_client.get_supplier_framework_info.return_value = {
'frameworkInterest': {'declaration': {'status': 'complete'}, 'onFramework': True}
}
resp = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/reuse?reusable_declaration_framework_slug=g-cloud-8'
)
assert resp.status_code == 200
self.data_api_client.get_framework.assert_has_calls([mock.call('g-cloud-9'), mock.call('g-cloud-8')])
self.data_api_client.get_supplier_framework_info.assert_called_once_with(1234, 'g-cloud-8')
def test_404_when_specified_declaration_not_found(self):
"""Fail on a 404 if declaration is specified but not found."""
self.data_api_client.get_framework.return_value = {'frameworks': {'status': 'open'}}
self.data_api_client.get_supplier_framework_info.side_effect = APIError(mock.Mock(status_code=404))
resp = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/reuse?reusable_declaration_framework_slug=g-cloud-8'
)
assert resp.status_code == 404
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9'),
]
self.data_api_client.get_supplier_framework_info.assert_called_once_with(1234, 'g-cloud-8')
def test_redirect_when_declaration_not_found(self):
"""Redirect if a reusable declaration is not found."""
self.data_api_client.get_framework.return_value = self.framework_stub
frameworks = [
FrameworkStub(
name='ben-cloud-2',
allow_declaration_reuse=True,
applications_close_at=datetime(2009, 3, 3, 1, 1, 1)
).response()
]
supplier_declarations = []
self.data_api_client.find_frameworks.return_value = {'frameworks': frameworks}
self.data_api_client.find_supplier_declarations.return_value = dict(
frameworkInterest=supplier_declarations
)
resp = self.client.get(
'/suppliers/frameworks/g-cloud-9/declaration/reuse',
)
assert resp.location.endswith('/suppliers/frameworks/g-cloud-9/declaration')
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('g-cloud-9'),
]
self.data_api_client.find_supplier_declarations.assert_called_once_with(1234)
def test_success_reuse_g_cloud_7_for_8(self):
"""Test success path."""
t09 = datetime(2009, 3, 3, 1, 1, 1)
t10 = datetime(2010, 3, 3, 1, 1, 1)
t11 = datetime(2011, 3, 3, 1, 1, 1)
t12 = datetime(2012, 3, 3, 1, 1, 1)
frameworks_response = [
FrameworkStub(slug='g-cloud-8', allow_declaration_reuse=True, applications_close_at=t12).response(),
FrameworkStub(slug='g-cloud-7', allow_declaration_reuse=True, applications_close_at=t11).response(),
FrameworkStub(
slug='digital-outcomes-and-specialists', allow_declaration_reuse=True, applications_close_at=t10
).response(),
FrameworkStub(slug='g-cloud-6', allow_declaration_reuse=True, applications_close_at=t09).response(),
]
framework_response = FrameworkStub(
slug='g-cloud-8', allow_declaration_reuse=True, applications_close_at=t09).response()
supplier_declarations_response = [
{'x': 'foo', 'frameworkSlug': 'g-cloud-6', 'declaration': {'status': 'complete'}, 'onFramework': True},
{'x': 'foo', 'frameworkSlug': 'g-cloud-7', 'declaration': {'status': 'complete'}, 'onFramework': True},
{'x': 'foo', 'frameworkSlug': 'dos', 'declaration': {'status': 'complete'}, 'onFramework': True}
]
self.data_api_client.find_frameworks.return_value = {'frameworks': frameworks_response}
self.data_api_client.get_framework.return_value = {'frameworks': framework_response}
self.data_api_client.find_supplier_declarations.return_value = {
'frameworkInterest': supplier_declarations_response
}
resp = self.client.get(
'/suppliers/frameworks/g-cloud-8/declaration/reuse',
)
assert resp.status_code == 200
expected = 'In March 2011, your organisation completed a declaration for G-Cloud 7.'
assert expected in str(resp.data)
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-8'),
mock.call('g-cloud-8'),
]
self.data_api_client.find_supplier_declarations.assert_called_once_with(1234)
class TestReuseFrameworkSupplierDeclarationPost(BaseApplicationTest,
MockEnsureApplicationCompanyDetailsHaveBeenConfirmedMixin):
"""Tests for frameworks/<framework_slug>/declaration/reuse POST view."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
self.login()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
def test_reuse_false(self):
"""Assert that the redirect happens and the client sets the prefill pref to None."""
self.data_api_client.get_framework.return_value = self.framework()
data = {'reuse': 'False', 'old_framework_slug': 'should-not-be-used'}
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.location.endswith('/suppliers/frameworks/g-cloud-9/declaration')
self.data_api_client.set_supplier_framework_prefill_declaration.assert_called_once_with(
1234,
'g-cloud-9',
None,
'email@email.com'
)
def test_reuse_true(self):
"""Assert that the redirect happens and the client sets the prefill pref to the desired framework slug."""
data = {'reuse': True, 'old_framework_slug': 'digital-outcomes-and-specialists-2'}
self.data_api_client.get_supplier_framework_info.return_value = {
'frameworkInterest': {
'x_field': 'foo',
'frameworkSlug': 'digital-outcomes-and-specialists-2',
'declaration': {'status': 'complete'},
'onFramework': True
}
}
framework_response = {'frameworks': {'status': 'open', 'x_field': 'foo', 'allowDeclarationReuse': True}}
self.data_api_client.get_framework.return_value = framework_response
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.location.endswith('/suppliers/frameworks/g-cloud-9/declaration')
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists-2'),
]
self.data_api_client.get_supplier_framework_info.assert_called_once_with(
1234,
'digital-outcomes-and-specialists-2'
)
self.data_api_client.set_supplier_framework_prefill_declaration.assert_called_once_with(
1234,
'g-cloud-9',
'digital-outcomes-and-specialists-2',
'email@email.com'
)
def test_reuse_invalid_framework_post(self):
"""Assert 404 for non reusable framework."""
data = {'reuse': 'true', 'old_framework_slug': 'digital-outcomes-and-specialists'}
# A framework with allowDeclarationReuse as False
self.data_api_client.get_framework.return_value = {
'frameworks': {'status': 'open', 'x_field': 'foo', 'allowDeclarationReuse': False}
}
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists'),
]
assert not self.data_api_client.get_supplier_framework_info.called
assert resp.status_code == 404
def test_reuse_non_existent_framework_post(self):
"""Assert 404 for non existent framework."""
data = {'reuse': 'true', 'old_framework_slug': 'digital-outcomes-and-specialists-1000000'}
# Attach does not exist.
self.data_api_client.get_framework.side_effect = [self.framework(), HTTPError()]
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.status_code == 404
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists-1000000')
]
# Should not do the declaration call if the framework is invalid.
assert not self.data_api_client.get_supplier_framework_info.called
def test_reuse_non_existent_declaration_post(self):
"""Assert 404 for non existent declaration."""
data = {'reuse': 'true', 'old_framework_slug': 'digital-outcomes-and-specialists-2'}
framework_response = {'frameworks': {'status': 'open', 'x_field': 'foo', 'allowDeclarationReuse': True}}
self.data_api_client.get_framework.return_value = framework_response
self.data_api_client.get_supplier_framework_info.side_effect = HTTPError()
# Do the post.
resp = self.client.post('/suppliers/frameworks/g-cloud-9/declaration/reuse', data=data)
assert resp.status_code == 404
# Should get the framework
assert self.data_api_client.get_framework.call_args_list == [
mock.call('g-cloud-9'),
mock.call('digital-outcomes-and-specialists-2'),
]
# Should error getting declaration.
self.data_api_client.get_supplier_framework_info.assert_called_once_with(
1234, 'digital-outcomes-and-specialists-2'
)
class TestReuseFrameworkSupplierDeclarationForm(BaseApplicationTest):
"""Tests for app.main.forms.frameworks.ReuseDeclarationForm form."""
@pytest.mark.parametrize('falsey_value', ('False', '', 'false'))
def test_false_values(self, falsey_value):
with self.app.test_request_context():
data = MultiDict({'framework_slug': 'digital-outcomes-and-specialists', 'reuse': falsey_value})
form = ReuseDeclarationForm(data)
assert form.reuse.data is False
class TestSignatureLegalAuthority(BaseApplicationTest):
"""Tests for app.main.views.frameworks.legal_authority."""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize(
('framework_status', 'status_code'),
(
('coming', 404),
('open', 404),
('pending', 404),
('standstill', 200),
('live', 200),
('expired', 404),
)
)
def test_only_works_for_live_and_standstill_frameworks(self, framework_status, status_code):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status=framework_status,
slug='g-cloud-12',
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.get("/suppliers/frameworks/g-cloud-12/start-framework-agreement-signing")
assert res.status_code == status_code
@pytest.mark.parametrize(
('is_e_signature_supported', 'on_framework', 'status_code'),
(
(False, True, 404),
(True, True, 200),
(True, False, 400),
)
)
def test_only_works_for_supported_frameworks(self, is_e_signature_supported, on_framework, status_code):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status='standstill',
slug='g-cloud-12',
is_e_signature_supported=is_e_signature_supported)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=on_framework)
res = self.client.get(f"/suppliers/frameworks/g-cloud-12/start-framework-agreement-signing")
assert res.status_code == status_code
def test_post_yes_redirects_to_signing_page(self):
framework_slug = 'g-cloud-12'
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug=framework_slug,
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post(f"/suppliers/frameworks/{framework_slug}/start-framework-agreement-signing",
data={'legal_authority': 'yes'})
assert res.status_code == 302
assert res.location == 'http://localhost/suppliers/frameworks/g-cloud-12/sign-framework-agreement' \
def test_post_no_shows_info(self):
framework_slug = 'g-cloud-12'
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug=framework_slug,
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post(f"/suppliers/frameworks/{framework_slug}/start-framework-agreement-signing",
data={'legal_authority': 'no'})
assert res.status_code == 200
assert "You cannot sign the Framework Agreement" in res.get_data(as_text=True)
def test_post_no_response_shows_error(self):
framework_slug = 'g-cloud-12'
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug=framework_slug,
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post(f"/suppliers/frameworks/{framework_slug}/start-framework-agreement-signing",
data={})
assert res.status_code == 400
assert "Select yes if you have the legal authority to sign on behalf of your company" in res.get_data(
as_text=True)
class TestSignFrameworkAgreement(BaseApplicationTest):
"""Tests for app.main.views.frameworks.sign_framework_agreement"""
def setup_method(self, method):
super().setup_method(method)
self.data_api_client_patch = mock.patch('app.main.views.frameworks.data_api_client', autospec=True)
self.data_api_client = self.data_api_client_patch.start()
def teardown_method(self, method):
self.data_api_client_patch.stop()
super().teardown_method(method)
@pytest.mark.parametrize(
('is_e_signature_supported', 'on_framework', 'status_code'),
(
(False, True, 404),
(True, True, 200),
(True, False, 400),
)
)
def test_only_works_for_supported_frameworks(self, is_e_signature_supported, on_framework, status_code):
self.login()
self.data_api_client.get_framework.return_value = self.framework(
status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=is_e_signature_supported)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=on_framework)
res = self.client.get(f"/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
assert res.status_code == status_code
@pytest.mark.parametrize(
('framework_status', 'status_code'),
(
('coming', 404),
('open', 404),
('pending', 404),
('standstill', 200),
('live', 200),
('expired', 404),
)
)
def test_only_works_for_live_and_standstill_frameworks(self, framework_status, status_code):
self.data_api_client.get_framework.return_value = self.framework(status=framework_status,
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
assert res.status_code == status_code
def test_shows_error_messages(self):
self.login()
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
res = self.client.post("/suppliers/frameworks/g-cloud-12/sign-framework-agreement", data={})
assert res.status_code == 400
text = res.get_data(as_text=True)
assert 'Enter your full name.' in text
assert 'Enter your role in the company.' in text
assert 'Accept the terms and conditions of the Framework Agreement.' in text
def test_post_signs_agreement(self):
self.data_api_client.create_framework_agreement.return_value = {"agreement": {"id": 789}}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
assert res.status_code == 200
res = self.client.post("/suppliers/frameworks/g-cloud-12/sign-framework-agreement",
data={"signerName": "Jane Doe",
"signerRole": "Director",
"signer_terms_and_conditions": "True"})
self.data_api_client.create_framework_agreement.assert_called_once_with(1234, 'g-cloud-12', 'email@email.com')
self.data_api_client.update_framework_agreement.assert_called_once_with(789, {
"signedAgreementDetails": {"signerName": "Jane Doe",
"signerRole": "Director"}},
"email@email.com")
self.data_api_client.sign_framework_agreement.assert_called_once_with(
789,
'email@email.com',
{'uploaderUserId': 123}
)
assert res.status_code == 200
doc = html.fromstring(res.get_data(as_text=True))
assert doc.xpath("//h1")[0].text_content().strip() == "You’ve signed the G-Cloud 12 Framework Agreement"
@mock.patch('app.main.views.frameworks.DMNotifyClient', autospec=True)
def test_sign_framework_agreement_sends_notify_emails(self, mock_dmnotifyclient_class):
mock_dmnotifyclient_instance = mock_dmnotifyclient_class.return_value
self.data_api_client.find_users_iter.return_value = [
{'emailAddress': 'email1', 'active': True},
{'emailAddress': 'email2', 'active': True},
{'emailAddress': 'email3', 'active': False}
]
self.data_api_client.create_framework_agreement.return_value = {"agreement": {"id": 789}}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(
on_framework=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.login()
self.client.post("/suppliers/frameworks/g-cloud-12/sign-framework-agreement",
data={"signerName": "Jane Doe",
"signerRole": "Director",
"signer_terms_and_conditions": "True"})
assert mock_dmnotifyclient_instance.send_email.call_count == 2
assert (mock_dmnotifyclient_instance.send_email.call_args[1].get('template_name_or_id') ==
'sign_framework_agreement_confirmation')
def test_agreement_text_contains_supplier_details(self):
self.data_api_client.get_framework.return_value = self.framework(status='standstill',
slug='g-cloud-12',
framework_agreement_version="1",
is_e_signature_supported=True)
self.data_api_client.find_draft_services_by_framework.return_value = {
'meta': {'total': 1}
}
self.data_api_client.get_supplier_framework_info.return_value = self.supplier_framework(on_framework=True)
self.data_api_client.get_supplier.return_value = {'suppliers': {'registeredName': 'Acme Company',
'companiesHouseNumber': '87654321',
'contactInformation':
[{'address1': '10 Downing Street',
'city': 'London',
'postcode': 'SW1A 2AA'
}]}}
self.login()
res = self.client.get("/suppliers/frameworks/g-cloud-12/sign-framework-agreement")
text = res.get_data(as_text=True)
assert "Lot 1: Cloud hosting, Lot 2: Cloud software, Lot 3: Cloud support" in text
assert "Acme Company" in text
assert "87654321" in text
assert "10 Downing Street, London, SW1A 2AA" in text
| 45.098202 | 168 | 0.598616 |
afae8ddfc5d7239f7e7f011a3683064873f351c5 | 224 | py | Python | Exercises/W07D05_Exercise_01_Django/polls/urls.py | Roger-Takeshita/Software_Engineer | ec647bb969aa02453dae1884b5787d2045f7b4e2 | [
"MIT"
] | 2 | 2019-12-27T06:15:26.000Z | 2020-05-21T17:37:12.000Z | Exercises/W07D05_Exercise_01_Django/polls/urls.py | Roger-Takeshita/Bootcamp-Software-Engineer | ec647bb969aa02453dae1884b5787d2045f7b4e2 | [
"MIT"
] | null | null | null | Exercises/W07D05_Exercise_01_Django/polls/urls.py | Roger-Takeshita/Bootcamp-Software-Engineer | ec647bb969aa02453dae1884b5787d2045f7b4e2 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views # current folder that i am in, i am going to import my view modules
urlpatterns = [
path('', views.index, name='index') # we gave a name to our routes as 'index'
] | 37.333333 | 93 | 0.683036 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.