hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7153d3e5f303fac4afc1dc66b303035bd382d50 | 969 | py | Python | doc/api/epydoc/build.py | swamper123/pymodbus | 7dfac6f19c60d3aa50a168ff82db88204dfb3a30 | [
"BSD-3-Clause"
] | null | null | null | doc/api/epydoc/build.py | swamper123/pymodbus | 7dfac6f19c60d3aa50a168ff82db88204dfb3a30 | [
"BSD-3-Clause"
] | 1 | 2020-10-29T12:01:38.000Z | 2022-03-21T02:39:59.000Z | doc/api/epydoc/build.py | swamper123/pymodbus | 7dfac6f19c60d3aa50a168ff82db88204dfb3a30 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
'''
Epydoc API Runner
------------------
Using pkg_resources, we attempt to see if epydoc is installed,
if so, we use its cli program to compile the documents
'''
try:
import sys, os, shutil
import pkg_resources
pkg_resources.require("epydoc")
from epydoc.cli import cli
sys.argv = '''epydoc.py pymodbus
--html --simple-term --quiet
--include-log
--graph=all
--docformat=plaintext
--debug
--exclude=._
--exclude=tests
--output=html/
'''.split()
#bugs in trunk for --docformat=restructuredtext
if not os.path.exists("./html"):
os.mkdir("./html")
print( "Building Epydoc API Documentation")
cli()
if os.path.exists('../../../build'):
shutil.move("html", "../../../build/epydoc")
except Exception as ex:
import traceback,sys
traceback.print_exc(file=sys.stdout)
print( "Epydoc not avaliable...not building")
| 24.846154 | 62 | 0.603715 |
f71573d18019e66119ed0720c4b4edddc4c1a5eb | 987 | py | Python | atom/nucleus/python/test/test_order_reconcile_return_object.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_order_reconcile_return_object.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_order_reconcile_return_object.py | AbhiGupta03/SDK | f3a61aae7a847f07f0c22a154ca88dc378e9d25e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.order_reconcile_return_object import OrderReconcileReturnObject # noqa: E501
from nucleus_api.rest import ApiException
class TestOrderReconcileReturnObject(unittest.TestCase):
"""OrderReconcileReturnObject unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrderReconcileReturnObject(self):
"""Test OrderReconcileReturnObject"""
# FIXME: construct object with mandatory attributes with example values
# model = nucleus_api.models.order_reconcile_return_object.OrderReconcileReturnObject() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.073171 | 109 | 0.733536 |
f715b38795d175d33576ab05dea9a3fe41688f13 | 1,863 | py | Python | samples/snippets/detect/label-products.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null | samples/snippets/detect/label-products.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null | samples/snippets/detect/label-products.py | glaswasser/python-vision | 706c314a86b8f35c313bb3e907ae84317dca1a0b | [
"Apache-2.0"
] | null | null | null |
from detect import (detect_logos, detect_text)
import pandas as pd
import re
import os
#from __future__ import print_function
from google.cloud import vision
images_path = "C:\\Users\\heinz\\Yagora GmbH\\Ievgen Kyrda - Crawler\\images\\foodnewsgermany_images/"
file_names = os.listdir(os.path.dirname(images_path))
file_paths = [images_path + f for f in file_names]
logos = [detect_logos(f) for f in file_paths]
texts = [detect_text(f)[0].description for f in file_paths]
# remove line break symbols
texts = [x.replace("\n", ", ") for x in texts]
contained = []
#contained[1] = "test"
for i in range(len(logos)): # loop over future rows of df
tmp = []
for j in logos[i]: # for every logo-row, check if in text
if j.lower() in texts[i].lower():
tmp.append(logos[i])
else:
tmp.append(None)
contained.append(tmp)
detect_df = pd.DataFrame(
list(zip(file_names, texts, logos, contained, file_paths)),
columns = ["files", "texts", "logos", "probable_brand", "file_path"]
)
detect_df
# other ideas:
# if logo in existing logos, add logo
from PIL import Image
from io import BytesIO
from IPython.display import HTML
import base64
pd.set_option('display.max_colwidth', -1)
def get_thumbnail(path):
i = Image.open(path)
i.thumbnail((150, 150), Image.LANCZOS)
return i
def image_base64(im):
if isinstance(im, str):
im = get_thumbnail(im)
with BytesIO() as buffer:
im.save(buffer, 'jpeg')
return base64.b64encode(buffer.getvalue()).decode()
def image_formatter(im):
return f'<img src="data:image/jpeg;base64,{image_base64(im)}">'
#dogs['file'] = dogs.id.map(lambda id: f'../input/train/{id}.jpg')
detect_df['image'] = detect_df.file_path.map(lambda f: get_thumbnail(f))
HTML(detect_df.to_html(formatters={'image': image_formatter}, escape=False)) | 26.239437 | 102 | 0.688137 |
f715e2b4af325720c565d744e3e3558d6ec968b2 | 11,243 | py | Python | bookworm/annotation/annotation_gui.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 18 | 2019-07-19T22:12:15.000Z | 2020-08-26T17:45:19.000Z | bookworm/annotation/annotation_gui.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 44 | 2019-07-15T10:17:00.000Z | 2020-07-26T11:22:53.000Z | bookworm/annotation/annotation_gui.py | mush42/bookworm | a4bdd89363137a89a1bed1e9e072de4fb55576fd | [
"MIT"
] | 9 | 2019-09-03T13:13:31.000Z | 2020-08-25T13:55:27.000Z | # coding: utf-8
import wx
from enum import IntEnum
from bookworm import speech
from bookworm.gui.settings import SettingsPanel
from bookworm.structured_text import TextRange
from bookworm.logger import logger
from .annotator import Bookmarker, NoteTaker, Quoter
from .annotation_dialogs import (
BookmarksViewer,
CommentsDialog,
QuotesDialog,
GenericAnnotationWithContentDialog,
)
log = logger.getChild(__name__)
class AnnotationSettingsPanel(SettingsPanel):
config_section = "annotation"
def addControls(self):
# Translators: the title of a group of controls in the
UIBox = self.make_static_box(_("Annotation"))
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Speak the bookmark when jumping"),
name="annotation.speak_bookmarks_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Select the bookmarked line when jumping"),
name="annotation.select_bookmarked_line_on_jumping",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use visual styles to indicate annotations"),
name="annotation.use_visuals",
)
wx.CheckBox(
UIBox,
-1,
# Translators: the label of a checkbox
_("Use sounds to indicate the presence of comments"),
name="annotation.play_sound_for_comments",
)
class AnnotationsMenuIds(IntEnum):
addBookmark = 241
addNamedBookmark = 242
addNote = 243
quoteSelection = 244
viewBookmarks = 245
class StatelessAnnotationsMenuIds(IntEnum):
viewNotes = 246
viewQuotes = 247
ANNOTATIONS_KEYBOARD_SHORTCUTS = {
AnnotationsMenuIds.addBookmark: "Ctrl-B",
AnnotationsMenuIds.addNamedBookmark: "Ctrl-Shift-B",
AnnotationsMenuIds.addNote: "Ctrl-M",
AnnotationsMenuIds.quoteSelection: "Ctrl-H",
}
class AnnotationMenu(wx.Menu):
"""Annotation menu."""
def __init__(self, service):
super().__init__()
self.service = service
self.view = service.view
self.reader = service.reader
# Add menu items
self.Append(
AnnotationsMenuIds.addBookmark,
# Translators: the label of an item in the application menubar
_("Add &Bookmark\tCtrl-B"),
# Translators: the help text of an item in the application menubar
_("Add a bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNamedBookmark,
# Translators: the label of an item in the application menubar
_("Add &Named Bookmark...\tCtrl-Shift-B"),
# Translators: the help text of an item in the application menubar
_("Add a named bookmark at the current position"),
)
self.Append(
AnnotationsMenuIds.addNote,
# Translators: the label of an item in the application menubar
_("Add Co&mment...\tCtrl-M"),
# Translators: the help text of an item in the application menubar
_("Add a comment at the current position"),
)
self.Append(
AnnotationsMenuIds.quoteSelection,
# Translators: the label of an item in the application menubar
_("&Highlight Selection\tCtrl-H"),
# Translators: the help text of an item in the application menubar
_("Highlight selected text and save it."),
)
self.Append(
AnnotationsMenuIds.viewBookmarks,
# Translators: the label of an item in the application menubar
_("Saved &Bookmarks..."),
# Translators: the help text of an item in the application menubar
_("View added bookmarks"),
)
self.Append(
StatelessAnnotationsMenuIds.viewNotes,
# Translators: the label of an item in the application menubar
_("Saved Co&mments..."),
# Translators: the help text of an item in the application menubar
_("View, edit, and remove comments."),
)
self.Append(
StatelessAnnotationsMenuIds.viewQuotes,
# Translators: the label of an item in the application menubar
_("Saved &Highlights..."),
# Translators: the help text of an item in the application menubar
_("View saved highlights."),
)
# Translators: the label of an item in the application menubar
# EventHandlers
self.view.Bind(
wx.EVT_MENU, self.onAddBookmark, id=AnnotationsMenuIds.addBookmark
)
self.view.Bind(
wx.EVT_MENU, self.onAddNamedBookmark, id=AnnotationsMenuIds.addNamedBookmark
)
self.view.Bind(wx.EVT_MENU, self.onAddNote, id=AnnotationsMenuIds.addNote)
self.view.Bind(
wx.EVT_MENU, self.onQuoteSelection, id=AnnotationsMenuIds.quoteSelection
)
self.view.Bind(
wx.EVT_MENU, self.onViewBookmarks, id=AnnotationsMenuIds.viewBookmarks
)
self.view.Bind(
wx.EVT_MENU, self.onViewNotes, id=StatelessAnnotationsMenuIds.viewNotes
)
self.view.Bind(
wx.EVT_MENU, self.onViewQuotes, id=StatelessAnnotationsMenuIds.viewQuotes
)
def _add_bookmark(self, name=""):
bookmarker = Bookmarker(self.reader)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
__, __, current_lino = self.view.contentTextCtrl.PositionToXY(insertionPoint)
count = 0
for bkm in bookmarker.get_for_page(self.reader.current_page):
__, __, lino = self.view.contentTextCtrl.PositionToXY(bkm.position)
if lino == current_lino:
count += 1
bookmarker.delete(bkm.id)
self.service.style_bookmark(self.view, bkm.position, enable=False)
if count and not name:
return speech.announce(_("Bookmark removed"))
Bookmarker(self.reader).create(title=name, position=insertionPoint)
# Translators: spoken message
speech.announce(_("Bookmark Added"))
self.service.style_bookmark(self.view, insertionPoint)
def onAddBookmark(self, event):
self._add_bookmark()
def onAddNamedBookmark(self, event):
bookmark_name = self.view.get_text_from_user(
# Translators: title of a dialog
_("Add Named Bookmark"),
# Translators: label of a text entry
_("Bookmark name:"),
)
if bookmark_name:
self._add_bookmark(bookmark_name)
def onAddNote(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
insertionPoint = self.view.contentTextCtrl.GetInsertionPoint()
comment_text = self.view.get_text_from_user(
# Translators: the title of a dialog to add a comment
_("New Comment"),
# Translators: the label of an edit field to enter a comment
_("Comment:"),
style=wx.OK | wx.CANCEL | wx.TE_MULTILINE | wx.CENTER,
)
if not comment_text:
return
note = NoteTaker(self.reader).create(
title="", content=comment_text, position=insertionPoint
)
self.service.style_comment(self.view, insertionPoint)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Comment"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
note.tags.append(tag.strip())
NoteTaker.model.session.commit()
def onQuoteSelection(self, event):
_with_tags = wx.GetKeyState(wx.WXK_SHIFT)
quoter = Quoter(self.reader)
selected_text = self.view.contentTextCtrl.GetStringSelection()
if not selected_text:
return speech.announce(_("No selection"))
x, y = self.view.get_selection_range()
for q in quoter.get_for_page():
q_range = TextRange(q.start_pos, q.end_pos)
if (q_range.start == x) and (q_range.stop == y):
quoter.delete(q.id)
self.service.style_highlight(self.view, x, y, enable=False)
# Translators: spoken message
return speech.announce(_("Highlight removed"))
elif (q.start_pos < x) and (q.end_pos > y):
# Translators: spoken message
speech.announce(_("Already highlighted"))
return wx.Bell()
if (x in q_range) or (y in q_range):
if x not in q_range:
q.start_pos = x
q.session.commit()
self.service.style_highlight(self.view, x, q_range.stop)
return speech.announce(_("Highlight extended"))
elif y not in q_range:
q.end_pos = y
q.session.commit()
self.service.style_highlight(self.view, q_range.start, y)
# Translators: spoken message
return speech.announce(_("Highlight extended"))
quote = quoter.create(title="", content=selected_text, start_pos=x, end_pos=y)
# Translators: spoken message
speech.announce(_("Selection highlighted"))
self.service.style_highlight(self.view, x, y)
if _with_tags:
# add tags
tags_text = self.view.get_text_from_user(
# Translators: title of a dialog
_("Tag Highlight"),
# Translators: label of a text entry
_("Tags:"),
)
if tags_text:
for tag in tags_text.split():
quote.tags.append(tag.strip())
Quoter.model.session.commit()
def onViewBookmarks(self, event):
with BookmarksViewer(
parent=self.view,
reader=self.reader,
annotator=Bookmarker,
# Translators: the title of a dialog to view bookmarks
title=_("Bookmarks | {book}").format(book=self.reader.current_book.title),
) as dlg:
dlg.ShowModal()
def onViewNotes(self, event):
Dialog = (
CommentsDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Comments"),
reader=self.reader,
annotator_cls=NoteTaker,
can_edit=True,
) as dlg:
dlg.ShowModal()
def onViewQuotes(self, event):
Dialog = (
QuotesDialog if self.reader.ready else GenericAnnotationWithContentDialog
)
with Dialog(
parent=self.view,
title=_("Highlights"),
reader=self.reader,
annotator_cls=Quoter,
) as dlg:
dlg.ShowModal()
| 37.228477 | 88 | 0.592102 |
f71617895efc3dfd23246121c700461891099a24 | 6,196 | py | Python | docs/conf.py | EVEprosper/ProsperDatareader | 31f0d77074c21222161774f4d653326925611167 | [
"MIT"
] | null | null | null | docs/conf.py | EVEprosper/ProsperDatareader | 31f0d77074c21222161774f4d653326925611167 | [
"MIT"
] | 14 | 2017-08-14T02:25:42.000Z | 2018-11-16T19:15:52.000Z | docs/conf.py | EVEprosper/ProsperDatareader | 31f0d77074c21222161774f4d653326925611167 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# ProsperDatareader documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 31 09:30:33 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
## vv TODO vv: autodocs ##
import os
import sys
sys.path.insert(0, os.path.abspath('../prosper/datareader'))
sys.path.insert(0, os.path.abspath('../prosper'))
from _version import __version__
## ^^ TODO ^^ ##
import alabaster
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.napoleon',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinxcontrib.napoleon',
'alabaster',
]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'ProsperDatareader'
copyright = '2017, John Purcell'
author = 'John Purcell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.9.0'
# The full version, including alpha/beta/rc tags.
release = '0.9.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_static_path = ['_static']
templates_path = ['templates']
html_show_sourcelink = False
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo': 'logo-colour-sm.png',
'description': 'Uniform Data Collection',
'description_font_style': 'italic',
'github_user': 'eveprosper',
'github_repo': 'prosperdatareader',
'github_banner': True,
}
html_favicon = "static/prosper.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'index': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html',
],
'**': [
'about.html', 'patreon.html', 'globaltoc.html', 'searchbox.html'
]
}
#html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
#}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'ProsperDatareaderdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'ProsperDatareader.tex', 'ProsperDatareader Documentation',
'John Purcell', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'prosperdatareader', 'ProsperDatareader Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'ProsperDatareader', 'ProsperDatareader Documentation',
author, 'ProsperDatareader', 'One line description of project.',
'Miscellaneous'),
]
| 29.788462 | 80 | 0.675274 |
f71669f5529851928377789218c8de2abda6eaf6 | 5,513 | py | Python | 测试/tensorflow_hello/2.practices_on_nlp.py | shayxu-ai/A-Repository-for-Machine-Learning | 4b4cea15bb005d1c58f4395fde97cadf44fb0186 | [
"Apache-2.0"
] | null | null | null | 测试/tensorflow_hello/2.practices_on_nlp.py | shayxu-ai/A-Repository-for-Machine-Learning | 4b4cea15bb005d1c58f4395fde97cadf44fb0186 | [
"Apache-2.0"
] | null | null | null | 测试/tensorflow_hello/2.practices_on_nlp.py | shayxu-ai/A-Repository-for-Machine-Learning | 4b4cea15bb005d1c58f4395fde97cadf44fb0186 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# @Time: 2020/2/5,005 22:02
# @Last Update: 2020/2/5,005 22:02
# @Author: 徐缘
# @FileName: 2.practices_on_nlp.py
# @Software: PyCharm
from __future__ import absolute_import, division, print_function, unicode_literals # 导入一些熟悉的陌生人
# 绝对引入,精确除法,print,unicode类型字符串。都是为了适配python2,不加也罢
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
from tensorflow import keras
import tensorflow_hub as hub # 模型库
import tensorflow_datasets as tfds # 数据|库 https://tensorflow.google.cn/datasets/api_docs/python/tfds?hl=en
tfds.disable_progress_bar()
def version():
"""
国际惯例,先看下版本
"""
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
print("tfds version", tfds.__version__)
print("GPU is", "available" if tf.config.experimental.list_physical_devices("GPU") else "NOT AVAILABLE")
def tf_hub_hello():
"""
预训练word2vector(迁移学习) + 全连接层
loss: 0.329
accuracy: 0.858 我记得 cnn 文本分类可以有95%呢
"""
train_data, validation_data, test_data = tfds.load(
name="imdb_reviews", split=('train[:60%]', 'train[60%:]', 'test'),
as_supervised=True)
train_examples_batch, train_labels_batch = next(iter(train_data.batch(10)))
print(train_examples_batch)
print(train_labels_batch)
embedding = "https://hub.tensorflow.google.cn/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(embedding, input_shape=[],
dtype=tf.string, trainable=True)
print(hub_layer(train_examples_batch[:3]))
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
# model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_data.shuffle(10000).batch(512),
epochs=20,
validation_data=validation_data.batch(512),
verbose=1)
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print("%s: %.3f" % (name, value))
def preprocess_text():
"""
"""
(train_data, test_data), info = tfds.load(
# Use the version pre-encoded with an ~8k vocabulary.
'imdb_reviews/subwords8k',
# Return the train/test datasets as a tuple.
split=(tfds.Split.TRAIN, tfds.Split.TEST),
# Return (example, label) pairs from the dataset (instead of a dictionary).
as_supervised=True,
# Also return the `info` structure.
with_info=True)
encoder = info.features['text'].encoder
print('Vocabulary size: {}'.format(encoder.vocab_size))
sample_string = 'Hello TensorFlow.'
encoded_string = encoder.encode(sample_string)
print('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print('The original string: "{}"'.format(original_string))
assert original_string == sample_string
for ts in encoded_string:
print('{} ----> {}'.format(ts, encoder.decode([ts])))
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
encoder.decode(train_example)
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32, train_data.output_shapes))
test_batches = (
test_data
.padded_batch(32, train_data.output_shapes))
for example_batch, label_batch in train_batches.take(2):
print("Batch shape:", example_batch.shape)
print("label shape:", label_batch.shape)
model = keras.Sequential([
keras.layers.Embedding(encoder.vocab_size, 16),
keras.layers.GlobalAveragePooling1D(),
keras.layers.Dense(1, activation='sigmoid')])
model.summary()
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_batches,
epochs=10,
validation_data=test_batches,
validation_steps=30)
loss, accuracy = model.evaluate(test_batches)
print("Loss: ", loss)
print("Accuracy: ", accuracy)
history_dict = history.history
history_dict.keys()
import matplotlib.pyplot as plt
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'bo', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.show()
return
if __name__ == '__main__':
# version()
preprocess_text()
| 29.169312 | 108 | 0.643751 |
f71684275afc20018793ca67d49712a2693a0850 | 10,212 | py | Python | a3c_master_sewak.py | sebtac/MLxE | 93baa6b7c9fd14e54abd7199e868fb828e9a7c52 | [
"Apache-2.0"
] | 1 | 2020-12-15T17:19:33.000Z | 2020-12-15T17:19:33.000Z | a3c_master_sewak.py | sebtac/MLxE | 93baa6b7c9fd14e54abd7199e868fb828e9a7c52 | [
"Apache-2.0"
] | null | null | null | a3c_master_sewak.py | sebtac/MLxE | 93baa6b7c9fd14e54abd7199e868fb828e9a7c52 | [
"Apache-2.0"
] | null | null | null | """ A3C in Code - Centralized/ Gobal Network Parameter Server/ Controller
Based On:
A3C Code as in the book Deep Reinforcement Learning, Chapter 12.
Runtime: Python 3.6.5
Dependencies: numpy, matplotlib, tensorflow (/ tensorflow-gpu), gym
DocStrings: GoogleStyle
Author : Mohit Sewak (p20150023@goa-bits-pilani.ac.in)
Inspired from: A3C implementation on TensorFLow official github repository (Tensorflow/models/research)
**********************************************************************
Adjusted by Seabstian Taciak as part of develeopment of MLxE Architecture
@author: sebtac
@contact: https://www.linkedin.com/in/sebastian-taciak-5893861/
"""
# SET BEFORE RUNNIG
# AGENT TYPE
# 0 - Sewak Base Agent (Fixed)
# 1 - Sewak DNN Adjusted
# 2 - Sewak "Task" Modified
# 3 - Sewak ISTB (Iterative, Synchronous Thread Based)
Agent_Type = 3
learning_rate = 0.0001
import multiprocessing
cores = multiprocessing.cpu_count() # DEFAULT SETTING
#cores = 1 # FOR DEBUGGING
# GENERAL IMPORTS
import sys
sys.path.append(r'C:\Users\surface\Documents\Python\RL\MLxE\Mohit Sewak RL\Mohit12_A3C')
import time
import winsound
import logging
import os
import numpy as np
import matplotlib.pyplot as plt
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# DEEP LEARING and ENVIRONEMENT RELATER IMPORTS
import tensorflow as tf
import tensorflow_addons as tfa # ST for DNN Adjustment
import gym
# CUSTOM SEWAK's MODULES with OPTIONAL SEBTAC ADJUSTMENTS
from experience_replay_sewak import SimpleListBasedMemory
if Agent_Type == 0:
from actorcritic_model_sewak import ActorCriticModel as ACModel # For Sewak Fixed version
from a3c_worker_sewak_base import A3C_Worker # the intial Sewak's implementation with fixes of the Policy_Loss Calcultion
elif Agent_Type == 1:
from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel
from a3c_worker_sewak_DNN_Adjusted import A3C_Worker
elif Agent_Type == 2:
from actorcritic_model_sewak import ActorCriticModel_Dimond as ACModel
from a3c_worker_sewak_Task_Modifications import A3C_Worker
elif Agent_Type == 3:
from actorcritic_model_sewak import ActorCriticModel_DoubleDimond as ACModel
from a3c_worker_sewak_ISTB import A3C_Worker
# SEWAK's Implementation Fix
"""
- Policy Loss Calcualtion
- Using actual play in example generation (was random)
"""
# DNN Adjustments
"""
- Adding monotonic decrease in Learing Rate relative to the number of episodes run with:
self.alpha_power = 0.998
self.alpha_limit = 0.000001
- Modifying the Model to: common_network_size=[128,256,128], policy_network_size=[64,128,64], value_network_size=[64,128,64]
- Changing the Optimizer to RectifiedAdam -- requaires tensorflow_addons
- Changing Gamma coeffcient to 0.97
"""
# Task Specific Modifications
"""
- Modified state representation with addition of 5th parameter representing the squared distance of the cart from the center of the plane
- Adverse Initial Position
- Negative Reward: -10.0 (originally 0.0)
- Monotonically Decreasing Discount Factor (Gamma Coefficent)
- Goal Specific Reward for cart being close to center of the pland and the pole being close to vertical
"""
class A3C_Master():
"""A3C Master
Centralized Master class of A3C used for hosting the global network parameters and spawning the agents.
Args:
env_name (str): Name of a valid gym environment
model_dir (str): Directory for saving the model during training, and loading the same while playing
learning_rate (float): The learning rate (alpha) for the optimizer
Examples:
agent = A3C_Master()
agent.train()
agent.play()
"""
def __init__(self, Agent_Type=Agent_Type, env_name='CartPole-v0', model_dir="models", learning_rate=learning_rate): #ST 0.001 for Fixed, 0.0001 otherwise
self.env_name = env_name
self.model_dir = model_dir
self.alpha = learning_rate
if not os.path.exists(model_dir):
os.makedirs(model_dir)
self.env = gym.make(self.env_name)
self.action_size = self.env.action_space.n
if Agent_Type <= 1:
self.state_size = self.env.observation_space.shape[0] # For None TaH imlementations
elif Agent_Type == 2:
self.state_size = self.env.observation_space.shape[0] + 1 # ST for TaH implementation
elif Agent_Type == 3:
self.state_size = self.env.observation_space.shape[0] + 1 # ST for TaH implementation
if Agent_Type == 0:
self.optimizer = tf.keras.optimizers.Adam(self.alpha)
else:
self.optimizer = tfa.optimizers.RectifiedAdam(self.alpha) # ST DNN Adjustment
logger.debug("StateSize:{}, ActionSize:{}".format(self.state_size, self.action_size))
self.master_model = ACModel(self.action_size)
self.master_model(tf.convert_to_tensor(np.random.random((1, self.state_size)), dtype=tf.float32))
def train(self, cores):
"""Train the A3C agent
Main function to train the A3C agent after instantiation.
This method uses the number of processor cores to spawns as many Workers. The workers are spawned as
multiple parallel threads instead of multiple parallel processes. Being a threaded execution, the workers
share memory and hence can write directly into the shared global variables.
A more optimal, completely asynchronous implementation could be to spawn the workers as different processes
using a task queue or multiprocessing. In case if this is adopted, then the shared variables need to made
accessible in the distributed environment.
"""
a3c_workers = [A3C_Worker(self.master_model,
self.optimizer,
i,
self.env_name,
self.model_dir,
workers_num = cores,
learning_rate = learning_rate)
for i in range(cores)]
for i, worker in enumerate(a3c_workers):
logger.info("Starting worker {}".format(i))
worker.start()
[worker.join() for worker in a3c_workers]
self.plot_training_statistics()
def play(self):
"""Play the environment using a trained agent
This function opens a (graphical) window that will play a trained agent. The function will try to retrieve
the model saved in the model_dir with filename formatted to contain the associated env_name.
If the model is not found, then the function will first call the train function to start the training.
"""
env = self.env.unwrapped
state = env.reset()
model = self.master_model
model_path = os.path.join(self.model_dir, 'model_{}.h5'.format(self.env_name))
if not os.path.exists(model_path):
logger.info('A3CMaster: No model found at {}, starting fresh training before playing!'.format(model_path))
self.train()
logger.info('A3CMaster: Playing env, Loading model from: {}'.format(model_path))
print("Model Path:", model_path)
#model.load_weights(model_path)
done = False
step_counter = 0
reward_sum = 0
try:
while not done:
env.render(mode='rgb_array')
policy, value = model(tf.convert_to_tensor(state[None, :], dtype=tf.float32))
policy = tf.nn.softmax(policy)
action = np.argmax(policy)
state, reward, done, _ = env.step(action)
reward_sum += reward
logger.info("{}. Reward: {}, action: {}".format(step_counter, reward_sum, action))
step_counter += 1
except KeyboardInterrupt:
print("Received Keyboard Interrupt. Shutting down.")
finally:
env.close()
def plot_training_statistics(self, training_statistics=None):
"""Plot training statistics
This function plot the training statistics like the steps, rewards, discounted_rewards, and loss in each
of the training episode.
"""
training_statistics = A3C_Worker.global_shared_training_stats if training_statistics is None \
else training_statistics
all_episodes = []
all_steps = []
all_rewards = []
all_discounted_rewards = []
all_losses = []
for stats in training_statistics:
worker, episode, steps, reward, discounted_rewards, loss = stats
all_episodes.append(episode)
all_steps.append(steps)
all_rewards.append(reward)
all_discounted_rewards.append(discounted_rewards)
all_losses.append(loss)
self._make_double_axis_plot(all_episodes, all_steps, all_rewards)
self._make_double_axis_plot(all_episodes,all_discounted_rewards,all_losses, label_y1="Discounted Reward",
label_y2="Loss", color_y1="cyan", color_y2="black")
np.savetxt('run.csv', all_steps, delimiter=',', fmt='%d')
@staticmethod
def _make_double_axis_plot(data_x, data_y1, data_y2, x_label='Episodes (e)', label_y1='Steps To Episode Completion',
label_y2='Reward in each Episode', color_y1="red", color_y2="blue"):
"""Internal helper function for plotting dual axis plots
"""
fig, ax1 = plt.subplots()
ax1.set_xlabel(x_label)
ax1.set_ylabel(label_y1, color=color_y1)
ax1.plot(data_x, data_y1, color=color_y1)
ax2 = ax1.twinx()
ax2.set_ylabel(label_y2, color=color_y2)
ax2.plot(data_x, data_y2, color=color_y2)
fig.tight_layout()
plt.show()
if __name__ == "__main__":
"""Main function for testing the A3C Master code's implementation
"""
agent = A3C_Master(Agent_Type=Agent_Type)
agent.train(cores)
#agent.play()
for i in range(10):
winsound.Beep(500,500)
| 39.890625 | 158 | 0.665785 |
f7168f6333b407972ae83a3eb73553f078b2cd44 | 7,304 | py | Python | src/sage/combinat/kazhdan_lusztig.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/kazhdan_lusztig.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | src/sage/combinat/kazhdan_lusztig.py | saraedum/sage-renamed | d2da67b14da2ad766a5906425d60d43a3b3e1270 | [
"BSL-1.0"
] | null | null | null | r"""
Kazhdan-Lusztig Polynomials
AUTHORS:
- Daniel Bump (2008): initial version
- Alan J.X. Guo (2014-03-18): ``R_tilde()`` method.
"""
#*****************************************************************************
# Copyright (C) 2008 Daniel Bump <bump at match.stanford.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import, print_function, division
from sage.rings.polynomial.polynomial_element import is_Polynomial
from sage.misc.cachefunc import cached_method
from sage.rings.polynomial.laurent_polynomial import LaurentPolynomial
from sage.structure.sage_object import SageObject
from sage.structure.unique_representation import UniqueRepresentation
class KazhdanLusztigPolynomial(UniqueRepresentation, SageObject):
"""
A Kazhdan-Lusztig polynomial.
INPUT:
- ``W`` -- a Weyl Group
- ``q`` -- an indeterminate
OPTIONAL:
- ``trace`` -- if ``True``, then this displays the trace: the intermediate
results. This is instructive and fun.
The parent of ``q`` may be a :class:`PolynomialRing` or a
:class:`LaurentPolynomialRing`.
REFERENCES:
.. [KL79] \D. Kazhdan and G. Lusztig. *Representations of Coxeter
groups and Hecke algebras*. Invent. Math. **53** (1979).
no. 2, 165--184. :doi:`10.1007/BF01390031` :mathscinet:`MR0560412`
.. [Dy93] \M. J. Dyer. *Hecke algebras and shellings of Bruhat
intervals*. Compositio Mathematica, 1993, 89(1): 91-115.
.. [BB05] \A. Bjorner, F. Brenti. *Combinatorics of Coxeter
groups*. New York: Springer, 2005.
EXAMPLES::
sage: W = WeylGroup("B3",prefix="s")
sage: [s1,s2,s3] = W.simple_reflections()
sage: R.<q> = LaurentPolynomialRing(QQ)
sage: KL = KazhdanLusztigPolynomial(W,q)
sage: KL.P(s2,s3*s2*s3*s1*s2)
1 + q
A faster implementation (using the optional package Coxeter 3) is given by::
sage: W = CoxeterGroup(['B', 3], implementation='coxeter3') # optional - coxeter3
sage: W.kazhdan_lusztig_polynomial([2], [3,2,3,1,2]) # optional - coxeter3
q + 1
"""
def __init__(self, W, q, trace=False):
"""
Initialize ``self``.
EXAMPLES::
sage: W = WeylGroup("B3",prefix="s")
sage: R.<q> = LaurentPolynomialRing(QQ)
sage: KL = KazhdanLusztigPolynomial(W,q)
sage: TestSuite(KL).run()
"""
self._coxeter_group = W
self._q = q
self._trace = trace
self._one = W.one()
self._base_ring = q.parent()
if is_Polynomial(q):
self._base_ring_type = "polynomial"
elif isinstance(q, LaurentPolynomial):
self._base_ring_type = "laurent"
else:
self._base_ring_type = "unknown"
@cached_method
def R(self, x, y):
"""
Return the Kazhdan-Lusztig `R` polynomial.
INPUT:
- ``x``, ``y`` -- elements of the underlying Coxeter group
EXAMPLES::
sage: R.<q>=QQ[]
sage: W = WeylGroup("A2", prefix="s")
sage: [s1,s2]=W.simple_reflections()
sage: KL = KazhdanLusztigPolynomial(W, q)
sage: [KL.R(x,s2*s1) for x in [1,s1,s2,s1*s2]]
[q^2 - 2*q + 1, q - 1, q - 1, 0]
"""
if x == 1:
x = self._one
if y == 1:
y = self._one
if x == y:
return self._base_ring.one()
if not x.bruhat_le(y):
return self._base_ring.zero()
if y.length() == 0:
if x.length() == 0:
return self._base_ring.one()
else:
return self._base_ring.zero()
s = self._coxeter_group.simple_reflection(y.first_descent(side="left"))
if (s*x).length() < x.length():
ret = self.R(s*x,s*y)
if self._trace:
print(" R(%s,%s)=%s" % (x, y, ret))
return ret
else:
ret = (self._q-1)*self.R(s*x,y)+self._q*self.R(s*x,s*y)
if self._trace:
print(" R(%s,%s)=%s" % (x, y, ret))
return ret
@cached_method
def R_tilde(self, x, y):
r"""
Return the Kazhdan-Lusztig `\tilde{R}` polynomial.
Information about the `\tilde{R}` polynomials can be found in
[Dy93]_ and [BB05]_.
INPUT:
- ``x``, ``y`` -- elements of the underlying Coxeter group
EXAMPLES::
sage: R.<q> = QQ[]
sage: W = WeylGroup("A2", prefix="s")
sage: [s1,s2] = W.simple_reflections()
sage: KL = KazhdanLusztigPolynomial(W, q)
sage: [KL.R_tilde(x,s2*s1) for x in [1,s1,s2,s1*s2]]
[q^2, q, q, 0]
"""
if x == 1:
x = self._one
if y == 1:
y = self._one
if not x.bruhat_le(y):
return self._base_ring.zero()
if x == y:
return self._base_ring.one()
s = self._coxeter_group.simple_reflection(y.first_descent(side="right"))
if (x * s).length() < x.length():
ret = self.R_tilde(x * s, y * s)
if self._trace:
print(" R_tilde(%s,%s)=%s" % (x, y, ret))
return ret
else:
ret = self.R_tilde(x * s, y * s) + self._q * self.R_tilde(x, y * s)
if self._trace:
print(" R_tilde(%s,%s)=%s" % (x, y, ret))
return ret
@cached_method
def P(self, x, y):
"""
Return the Kazhdan-Lusztig `P` polynomial.
If the rank is large, this runs slowly at first but speeds up
as you do repeated calculations due to the caching.
INPUT:
- ``x``, ``y`` -- elements of the underlying Coxeter group
.. SEEALSO::
:mod:`~sage.libs.coxeter3.coxeter_group.CoxeterGroup.kazhdan_lusztig_polynomial`
for a faster implementation using Fokko Ducloux's Coxeter3 C++ library.
EXAMPLES::
sage: R.<q> = QQ[]
sage: W = WeylGroup("A3", prefix="s")
sage: [s1,s2,s3] = W.simple_reflections()
sage: KL = KazhdanLusztigPolynomial(W, q)
sage: KL.P(s2,s2*s1*s3*s2)
q + 1
"""
if x == 1:
x = self._one
if y == 1:
y = self._one
if x == y:
return self._base_ring.one()
if not x.bruhat_le(y):
return self._base_ring.zero()
if y.length() == 0:
if x.length() == 0:
return self._base_ring.one()
else:
return self._base_ring.zero()
p = sum(-self.R(x, t) * self.P(t, y)
for t in self._coxeter_group.bruhat_interval(x, y) if t != x)
tr = (y.length() - x.length() + 1) // 2
ret = p.truncate(tr)
if self._trace:
print(" P({},{})={}".format(x, y, ret))
return ret
| 32.035088 | 92 | 0.529299 |
f716cea90be05811860d24af0a9d540d7d2e2e6c | 4,451 | py | Python | code/distributeHI.py | modichirag/21cmhod | 0807a7b0b880f4ba5bc7161b843d500ddcece5a7 | [
"MIT"
] | null | null | null | code/distributeHI.py | modichirag/21cmhod | 0807a7b0b880f4ba5bc7161b843d500ddcece5a7 | [
"MIT"
] | null | null | null | code/distributeHI.py | modichirag/21cmhod | 0807a7b0b880f4ba5bc7161b843d500ddcece5a7 | [
"MIT"
] | null | null | null | import numpy as np
import re, os
from pmesh.pm import ParticleMesh
from nbodykit.lab import BigFileCatalog, BigFileMesh, MultipleSpeciesCatalog, FFTPower
from nbodykit import setup_logging
from mpi4py import MPI
import HImodels
# enable logging, we have some clue what's going on.
setup_logging('info')
#Get model as parameter
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', help='for small or big box', default='small')
parser.add_argument('-m', '--model', help='model name to use')
args = parser.parse_args()
if args.model == None:
print('Specify a model name')
sys.exit()
#print(args, args.model)
model = args.model #'ModelD'
boxsize = args.size
#
#
#Global, fixed things
scratchyf = '/global/cscratch1/sd/yfeng1/m3127/'
scratchcm = '/global/cscratch1/sd/chmodi/m3127/H1mass/'
project = '/project/projectdirs/m3127/H1mass/'
cosmodef = {'omegam':0.309167, 'h':0.677, 'omegab':0.048}
alist = [0.1429,0.1538,0.1667,0.1818,0.2000,0.2222,0.2500,0.2857,0.3333]
#Parameters, box size, number of mesh cells, simulation, ...
if boxsize == 'small':
bs, nc, ncsim, sim, prefix = 256, 512, 2560, 'highres/%d-9100-fixed'%2560, 'highres'
elif boxsize == 'big':
bs, nc, ncsim, sim, prefix = 1024, 1024, 10240, 'highres/%d-9100-fixed'%10240, 'highres'
else:
print('Box size not understood, should be "big" or "small"')
sys.exit()
# It's useful to have my rank for printing...
pm = ParticleMesh(BoxSize=bs, Nmesh=[nc, nc, nc])
rank = pm.comm.rank
comm = pm.comm
#Which model & configuration to use
modeldict = {'ModelA':HImodels.ModelA, 'ModelB':HImodels.ModelB, 'ModelC':HImodels.ModelC}
modedict = {'ModelA':'galaxies', 'ModelB':'galaxies', 'ModelC':'halos'}
HImodel = modeldict[model] #HImodels.ModelB
modelname = model
mode = modedict[model]
ofolder = '../data/outputs/'
def distribution(aa, halocat, cencat, satcat, outfolder, mbins=None):
'''Compute the fraction of HI in halos, centrals, satellites'''
if rank==0: print('Calculating distribution')
if mbins is None: mbins = np.logspace(9, 15, 100)
hmass = halocat['Mass'].compute()
htotal, hsize, h1total = [], [], []
for im in range(mbins.size-1):
mask = (hmass >= mbins[im]) & (hmass < mbins[im+1])
rankweight = (hmass*mask).sum()
htotal.append(comm.allreduce(rankweight))
rankweight = (mask).sum()
hsize.append(comm.allreduce(rankweight))
h1bin = []
for cat in [halocat['HImass'], cencat['HImass'], cencat['HIsat']]:
rankweight = (cat.compute()*mask).sum()
h1bin.append(comm.allreduce(rankweight))
h1total.append(h1bin)
#
if rank==0:
tosave = np.zeros((len(hsize), 5))
tosave[:, 1] = hsize
tosave[:, 0] = htotal / (tosave[:, 1])
tosave[:, 2:] = h1total/ (tosave[:, 1].reshape(-1, 1))
tosave[np.isnan(tosave)] = 0
header = 'Halo Mass, Number Halos, HI halos, HI centrals, HI satellites'
np.savetxt(outfolder + "HI_dist_{:6.4f}.txt".format(aa), tosave, fmt='%0.6e', header=header)
if __name__=="__main__":
if rank==0: print('Starting')
suff='-m1_00p3mh-alpha-0p8-subvol'
outfolder = ofolder + suff[1:]
if bs == 1024: outfolder = outfolder + "-big"
outfolder += "/%s/"%modelname
if rank == 0: print(outfolder)
#outfolder = ofolder + suff[1:] + "/%s/"%modelname
try:
os.makedirs(outfolder)
except : pass
for aa in alist:
if rank == 0: print('\n ############## Redshift = %0.2f ############## \n'%(1/aa-1))
halocat = BigFileCatalog(scratchyf + sim+ '/fastpm_%0.4f//'%aa, dataset='LL-0.200')
mp = halocat.attrs['MassTable'][1]*1e10##
halocat['Mass'] = halocat['Length'].compute() * mp
cencat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/cencat'%aa+suff)
satcat = BigFileCatalog(scratchcm + sim+'/fastpm_%0.4f/satcat'%aa+suff)
#
HImodelz = HImodel(aa)
halocat['HImass'], cencat['HImass'], satcat['HImass'] = HImodelz.assignHI(halocat, cencat, satcat)
cencat['HIsat'] = HImodelz.getinsat(satcat['HImass'].compute(), satcat['GlobalID'].compute(),
cencat.csize, cencat['Mass'].size, cencat.comm).local
mbins = 10**np.arange(9, 15.1, 0.2)
distribution(aa, halocat, cencat, satcat, outfolder, mbins=mbins)
| 34.238462 | 106 | 0.625927 |
f716fe49497e0fa092b298e9bf377c75b13a12bf | 1,247 | py | Python | servers/python/coweb/auth/public.py | opencoweb/coweb | 7b3a87ee9eda735a859447d404ee16edde1c5671 | [
"AFL-2.1"
] | 83 | 2015-01-05T19:02:57.000Z | 2021-11-19T02:48:09.000Z | servers/python/coweb/auth/public.py | xuelingxiao/coweb | 7b3a87ee9eda735a859447d404ee16edde1c5671 | [
"AFL-2.1"
] | 3 | 2015-12-16T13:49:33.000Z | 2019-06-17T13:38:50.000Z | servers/python/coweb/auth/public.py | xuelingxiao/coweb | 7b3a87ee9eda735a859447d404ee16edde1c5671 | [
"AFL-2.1"
] | 14 | 2015-04-29T22:36:53.000Z | 2021-11-18T03:24:29.000Z | '''
Copyright (c) The Dojo Foundation 2011. All Rights Reserved.
Copyright (c) IBM Corporation 2008, 2011. All Rights Reserved.
'''
from .base import AuthBase
class PublicAuth(AuthBase):
cookieName = 'coweb.auth.public.username'
_userId = 0
def requires_login(self):
'''Does not require login. Usernames automatically generated.'''
return False
def requires_cookies(self):
'''Uses tornado's secure cookies.'''
return True
def get_current_user(self, handler):
'''
Generates a unique userXXX for this server instance and stores it in a
secure cookie.
'''
username = handler.get_secure_cookie(self.cookieName)
if not username:
# generate a random username and set it with a very short lifetime
username = 'user%03d' % self._userId
# yes, this might conflict between server restarts but it's dummy
# public auth
self._userId += 1
handler.set_secure_cookie(self.cookieName, username, expires_days=1)
return username
def clear_credentials(self, handler):
'''Clears the authentication cookie.'''
handler.clear_cookie(self.cookieName)
| 34.638889 | 80 | 0.642342 |
f716fffcedc3cbaba6d963cd4a7e2061ef83cc34 | 4,109 | py | Python | mdeepctr/models/xdeepfm.py | TS-SE-GROUP/icme2019 | 7eefdb7de6a7ff3bec1721fafb822d80d80dbba3 | [
"MIT"
] | 78 | 2019-02-21T12:44:11.000Z | 2022-03-30T11:42:33.000Z | mdeepctr/models/xdeepfm.py | rightnowandholdon/icme2019 | fe9b31db7bf19b08d5e5d41a259f0a297eb21766 | [
"MIT"
] | 6 | 2019-04-11T13:14:46.000Z | 2021-05-19T14:36:07.000Z | mdeepctr/models/xdeepfm.py | rightnowandholdon/icme2019 | fe9b31db7bf19b08d5e5d41a259f0a297eb21766 | [
"MIT"
] | 22 | 2019-02-21T02:51:54.000Z | 2021-12-10T02:04:28.000Z | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.(https://arxiv.org/pdf/1803.05170.pdf)
"""
import tensorflow as tf
from ..input_embedding import preprocess_input_embedding
from ..layers.core import PredictionLayer, MLP
from ..layers.interaction import CIN
from ..utils import check_feature_config_dict
from ..layers.utils import concat_fun
def xDeepFM(feature_dim_dict, embedding_size=8, hidden_size=(256, 256), cin_layer_size=(128, 128,), cin_split_half=True, cin_activation='relu', l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False, output_dim=1,):
"""Instantiates the xDeepFM architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param cin_layer_size: list,list of positive integer or empty list, the feature maps in each hidden layer of Compressed Interaction Network
:param cin_split_half: bool.if set to True, half of the feature maps in each hidden will connect to output unit
:param cin_activation: activation function used on feature maps
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: L2 regularizer strength applied to embedding vector
:param l2_reg_deep: L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
if len(cin_layer_size) > 0:
exFM_out = CIN(cin_layer_size, cin_activation,
cin_split_half, seed)(fm_input)
exFM_logit = tf.keras.layers.Dense(1, activation=None,)(exFM_out)
deep_input = tf.keras.layers.Flatten()(fm_input)
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(hidden_size) == 0 and len(cin_layer_size) == 0: # only linear
final_logit = linear_logit
elif len(hidden_size) == 0 and len(cin_layer_size) > 0: # linear + CIN
final_logit = tf.keras.layers.add([linear_logit, exFM_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) == 0: # linear + Deep
final_logit = tf.keras.layers.add([linear_logit, deep_logit])
elif len(hidden_size) > 0 and len(cin_layer_size) > 0: # linear + CIN + Deep
final_logit = tf.keras.layers.add(
[linear_logit, deep_logit, exFM_logit])
else:
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model
| 54.065789 | 325 | 0.690436 |
f717181beb4c4046a045a44b6d0f4db857d85911 | 4,211 | py | Python | forte/processors/stanfordnlp_processor.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | 2 | 2021-01-01T12:07:27.000Z | 2021-09-10T03:57:18.000Z | forte/processors/stanfordnlp_processor.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | forte/processors/stanfordnlp_processor.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List, Any, Dict
import stanza
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.data_pack import DataPack
from forte.processors.base import PackProcessor
from ft.onto.base_ontology import Token, Sentence, Dependency
__all__ = [
"StandfordNLPProcessor",
]
class StandfordNLPProcessor(PackProcessor):
def __init__(self):
super().__init__()
self.nlp = None
self.processors = set()
def set_up(self):
stanza.download(self.configs.lang, self.configs.dir)
self.processors = set(self.configs.processors.split(','))
# pylint: disable=unused-argument
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
self.set_up()
self.nlp = stanza.Pipeline(
lang=self.configs.lang,
dir=self.configs.dir,
use_gpu=self.configs.use_gpu,
processors=self.configs.processors,
)
@classmethod
def default_configs(cls) -> Dict[str, Any]:
"""
This defines a basic config structure for StanfordNLP.
:return:
"""
config = super().default_configs()
config.update(
{
'processors': 'tokenize,pos,lemma,depparse',
'lang': 'en',
# Language code for the language to build the Pipeline
'use_gpu': False,
'dir': '.',
})
return config
def _process(self, input_pack: DataPack):
doc = input_pack.text
if len(doc) == 0:
logging.warning("Find empty text in doc.")
# sentence parsing
sentences = self.nlp(doc).sentences
# Iterating through stanfordnlp sentence objects
for sentence in sentences:
Sentence(input_pack, sentence.tokens[0].start_char,
sentence.tokens[-1].end_char)
tokens: List[Token] = []
if "tokenize" in self.processors:
# Iterating through stanfordnlp word objects
for word in sentence.words:
misc = word.misc.split('|')
t_start = -1
t_end = -1
for m in misc:
k, v = m.split('=')
if k == 'start_char':
t_start = int(v)
elif k == 'end_char':
t_end = int(v)
if t_start < 0 or t_end < 0:
raise ValueError(
"Cannot determine word start or end for "
"stanfordnlp.")
token = Token(input_pack, t_start, t_end)
if "pos" in self.processors:
token.pos = word.pos
token.ud_xpos = word.xpos
if "lemma" in self.processors:
token.lemma = word.lemma
tokens.append(token)
# For each sentence, get the dependency relations among tokens
if "depparse" in self.processors:
# Iterating through token entries in current sentence
for token, word in zip(tokens, sentence.words):
child = token # current token
parent = tokens[word.head - 1] # Head token
relation_entry = Dependency(input_pack, parent, child)
relation_entry.rel_type = word.deprel
| 35.091667 | 74 | 0.566611 |
f71722c909f61d25a111b3cedd33fb84627eb888 | 29,871 | py | Python | msp430backend/msp430_ws/server_protocol.py | zlalanne/msp430-webcontrol | a4e8f84942c3e16fa447907d2cfff4587013d6b3 | [
"BSD-3-Clause"
] | null | null | null | msp430backend/msp430_ws/server_protocol.py | zlalanne/msp430-webcontrol | a4e8f84942c3e16fa447907d2cfff4587013d6b3 | [
"BSD-3-Clause"
] | null | null | null | msp430backend/msp430_ws/server_protocol.py | zlalanne/msp430-webcontrol | a4e8f84942c3e16fa447907d2cfff4587013d6b3 | [
"BSD-3-Clause"
] | null | null | null | import requests
import json
import sys
import re
from twisted.internet.error import ConnectionDone
from twisted.internet import protocol, threads, reactor
from twisted.protocols.basic import LineReceiver
from twisted.python import log
from autobahn.websocket import WebSocketServerFactory, WebSocketServerProtocol, HttpException
import settings
import common_protocol
import buffer
import msp430_data.interface
import msp430_data.utility
class WebServerProtocol(LineReceiver):
def __init__(self):
self.client = None
def lineReceived(self, line):
line = line.strip()
if self.client is None:
if self.debug:
log.msg("WebServerProtocol.lineReceived - No Client type")
# TODO: This is an untested way to kill the connection. Need
# to test.
self.transport.loseConnection()
else:
self.client.dataReceived(line)
def connectionLost(self, reason=ConnectionDone):
if self.client is None:
if self.debug:
log.msg("WebServerProtocol.connectionClose - No Client type")
return
self.client.connectionLost(reason)
def connectionMade(self):
if self.transport.getPeer().host == settings.SITE_SERVER:
self.client = WebServerClient(self)
else:
self.client = MSP430Client(self)
self.client.connectionMade()
def register_msp430(self):
"""This sends a http request to the django appliaction. This effictevely
enters the MSP430 into the database of the django application. It then
tells the websocket server to alert the user that a new MSP430 has
come online"""
# Need to send MAC and IP of MSP430
payload = {}
payload['mac'] = self.client.mac
payload['ip'] = self.client.protocol.transport.getPeer().host
payload['iface'] = self.client.iface
data = {'json': payload}
try:
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
response = requests.post("http://%s/tcp_comm/register/" % settings.SITE_SERVER_ADDRESS, data=json.dumps(data),
headers=headers)
except:
pass
# TODO: Need to validate response
# Notify Browsers
reactor.callFromThread(self.factory.ws_factory.register_msp430_wsite, self)
def disconnect_msp430(self):
payload = {}
payload['mac'] = self.client.mac
data = {'json':payload}
try:
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
response = requests.post("http://%s/tcp_comm/disconnect/" % settings.SITE_SERVER_ADDRESS, data=json.dumps(data),
headers=headers)
except:
pass
# Notify Browsers
reactor.callFromThread(self.factory.ws_factory.disconnect_msp430_wsite, self)
class WebServerClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def connectionMade(self):
pass
def connectionLost(self, reason=ConnectionDone):
pass
def dataReceived(self, data):
data = data.strip()
try:
msp430s = json.loads(data)["json"]
log.msg(msp430s)
for msp430 in msp430s:
if self.protocol.factory.ws_factory.debug:
log.msg('WebServerClient - Recieved config for MSP430 %s' % msp430['mac'])
except:
if self.protocol.factory.ws_factory.debug:
log.msg('WebServerClient - Error parsing msp430 configs %s' % sys.exc_info())
return 'error'
# Delegate the reqest to the WS factory
self.protocol.factory.ws_factory.config_msp430(msp430)
class ServerState(common_protocol.State):
def __init__(self, client):
self.client = client
def activated(self):
if self.client.protocol.debug:
log.msg("%s.activated()" % self.__class__.__name__)
def deactivated(self):
if self.client.protocol.debug:
log.msg("%s.deactivated()" % self.__class__.__name__)
class WebSocketClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def onMessage(self, msg):
try:
state = self.state_stack.pop_wr()
except IndexError:
if self.protocol.factory.debug:
log.msg("%s.onMessage - Received a message in an unknown state, ignored", self.__class__.__name__)
state.onMessage(msg)
def onClose(self, wasClean, code, reason):
pass
def onOpen(self):
pass
class UserClient(WebSocketClient):
"""User client related protocol"""
def __init__(self, protocol):
WebSocketClient.__init__(self, protocol)
self.associated_msp430 = None
self.streaming_buffer_read = None
self.streaming_buffer_write = None
self.ackcount = 0
self.paused = True
def register_to_msp430(self, msp430_mac):
# Notify factory we want to unregister if registered first
self.ackcount = 0
if self.associated_msp430 is not None:
self.protocol.factory.unregister_user_to_msp430(self, self.associated_msp430)
msp430 = self.protocol.factory.get_msp430(msp430_mac)
if msp430:
self.streaming_buffer_read = buffer.UpdateDict()
self.streaming_buffer_write = buffer.UpdateDict()
self.associated_msp430 = msp430
self.protocol.factory.register_user_to_msp430(self, self.associated_msp430)
# begin streaming
self.resume_streaming()
def resume_streaming(self):
self.paused = False
self.copy_and_send()
def pause_streaming(self):
self.paused = True
def copy_and_send(self):
if self.ackcount <= -10 or self.paused:
return
# copy buffers
self.protocol.factory.copy_msp430_buffers(self.associated_msp430,
self.streaming_buffer_read,
self.streaming_buffer_write)
if len(self.streaming_buffer_read) > 0 or len(self.streaming_buffer_write) > 0:
msg = {'cmd':common_protocol.ServerCommands.WRITE_DATA}
msg['read'] = self.streaming_buffer_read
msg['write'] = self.streaming_buffer_write
self.ackcount -= 1
self.protocol.sendMessage(json.dumps(msg))
# keep polling until we run out of data
reactor.callLater(0, self.copy_and_send)
else:
# when there's new data resume will be called
self.pause_streaming()
def unregister_to_msp430(self):
self.pause_streaming()
if self.associated_msp430 is not None:
self.associated_msp430 = None
def notifyMSP430State(self, msp430, state):
if state == 'config':
if self.associated_msp430 is not msp430:
return
msg = {'cmd':common_protocol.ServerCommands.MSP430_STATE_CHANGE, 'msp430_mac':msp430.client.mac, 'msp430_state':state}
self.protocol.sendMessage(json.dumps(msg))
def onMessage(self, msg):
try:
msg = json.loads(msg)
except:
if self.protocol.debug:
log.msg('UserState.onMessage - JSON error, dropping')
self.protocol.failConnection()
if msg['cmd'] == common_protocol.UserClientCommands.CONNECT_MSP430:
mac = msg['msp430_mac']
self.register_to_msp430(mac)
elif msg['cmd'] == common_protocol.UserClientCommands.ACK_DATA:
ackcount = msg['ack_count']
self.ackcount += ackcount
if self.ackcount > -10:
self.copy_and_send()
elif msg['cmd'] == common_protocol.UserClientCommands.WRITE_DATA:
port = msg['iface_port']
value = msg['value']
if self.associated_msp430 is not None:
self.associated_msp430.write_interface_data(port, value)
def onClose(self, wasClean, code, reason):
self.protocol.factory.disconnect_user(self)
def onOpen(self):
self.protocol.factory.register_user(self)
class TCPClient(common_protocol.ProtocolState):
def __init__(self, protocol):
common_protocol.ProtocolState.__init__(self)
self.protocol = protocol
def connectionMade(self):
pass
def connectionLost(self, reason=ConnectionDone):
pass
def dataReceived(self, data):
try:
state = self.state_stack.pop_wr()
except IndexError:
if self.protocol.factory.debug:
log.msg("%s.onMessage - Received a message in an unknown state, ignored", self.__class__.__name__)
state.dataReceived(data)
class MSP430Client(TCPClient):
def __init__(self, protocol):
TCPClient.__init__(self, protocol)
def connectionMade(self):
self.push_state(MSP430RegisterState(self))
def connectionLost(self, reason=ConnectionDone):
# If we're registered remove ourselves from active client list
if hasattr(self, 'mac'):
self.protocol.factory.ws_factory.disconnect_msp430(self)
pass
def copy_buffers(self, read_buffer, write_buffer):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
for key, value in state.read_data_buffer_eq.iteritems():
read_buffer[key] = value
for key, value in state.write_data_buffer_eq.iteritems():
write_buffer[key] = value
return True
return False
def pause_streaming(self):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
state.pause_streaming()
return True
return False
def resume_streaming(self):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
state.resume_streaming()
return True
return False
def write_interface_data(self, key, data):
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430StreamState):
state.write_interface_data(key, data)
return True
return False
def config_io(self, reads, writes):
"""
read/writes are lsts of dicts with the following:
'ch_port': integer or boolean (check cls req)
'equation': empty, or python style math
'cls_name': class name as string, ex) 'ADC'
Returns True/False for success
"""
# check the state of the MSP430 client
try:
state = self.current_state()
except IndexError:
# MSP430 has no states
return False
if isinstance(state, MSP430ConfigState):
# ready to be configured
# MSP430 was waiting for config
pass
elif isinstance(state, MSP430StreamState):
# MSP430 is being re-configured
state.drop_to_config(reads, writes)
# config has to be delegated
return True
else:
# MSP430 can't be put into a config state, fail
return False
state = self.current_state()
# delegate the job to the config state
return state.config_io(reads, writes)
"""MSP430 client related protocol and states"""
class MSP430RegisterState(ServerState):
def __init__(self, client):
super(MSP430RegisterState, self).__init__(client)
self.registered = False
self.re_message_count = 0
def dataReceived(self, data):
if self.re_message_count == 0 and not self.registered:
# MSP430 is requesting to register
# TODO: Add some MSP430 authentication here
if data == common_protocol.ServerCommands.REGISTER:
self.re_message_count += 1
if self.client.protocol.debug:
log.msg("MSP430Client.dataReceived - Registration Request")
self.client.protocol.sendLine(common_protocol.ServerCommands.ACK)
else:
self.client.protocol.sendLine(common_protocol.ServerCommands.NACK)
elif self.re_message_count == 1 and not self.registered:
self.client.protocol.sendLine(common_protocol.ServerCommands.ACK)
def interface_desc(ifaces):
# List of classes that resemble I/O. Creating a struct based on
# their names, docstring, choices and I/O type to send to django
# application.
ret = []
for cls in ifaces:
name = cls.__name__
desc = msp430_data.utility.trim(cls.__doc__)
choices = []
for choice_pin, choice_desc in cls.IO_CHOICES:
choice = {}
choice['s'] = choice_pin
choice['d'] = choice_desc
choices.append(choice)
ret.append({'name':name, 'desc':desc, 'choices':choices, 'io_type':cls.IO_TYPE})
return ret
self.client.iface = {}
self.client.interfaces = msp430_data.interface.get_interface_desc()
for key in self.client.interfaces.iterkeys():
self.client.iface[key] = interface_desc(self.client.interfaces[key])
self.client.mac = data
self.registered = True
self.re_message_count = 0
if self.client.protocol.debug:
log.msg("MSP430Client.dataReceived - Successful Registration")
self.client.push_state(MSP430ConfigState(self.client))
# Add to dictionary of clients in the WS factory
self.client.protocol.factory.ws_factory.register_msp430(self.client)
else:
# TODO: Something went wrong
return
class MSP430ConfigState(ServerState):
"""In this state, the MSP430 is waiting to be configured.
Server is not required to configure the MSP430 immediately.
"""
def __init__(self, client):
super(MSP430ConfigState, self).__init__(client)
def dataReceived(self, data):
if data == common_protocol.MSP430ClientCommands.CONFIG_OK:
log.msg('MSP430ConfigState - MSP430 was configured')
self.client.push_state(MSP430StreamState(self.client,
reads=self.config_reads,
writes=self.config_writes,
interfaces=self.config_interfaces,
mapping=self.config_mapping
))
elif data == common_protocol.MSP430ClientCommands.CONFIG_FAIL:
if self.client.protocol.debug:
log.msg('MSP430ConfigState - MSP430 failed to configure')
# TODO: Notify web server
def config_io(self, reads, writes):
"""
read/writes are lsts of dicts with the following:
'ch_port': integer or boolean (check cls req)
'equation': empty, or python style math
'cls_name': class name as string, ex) 'ADC'
Returns True/False for success
"""
self.display_reads = reads
self.display_writes = writes
# Format IO to store on the server
def format_io(io_collection):
# Duplicate equations allowed, duplicate instances not allowed
instanced_io_dict = {}
for io in io_collection:
cls_str = io['cls_name']
ch_port = io['ch_port']
equation = io['equation']
key = 'cls:%s, port:%s' % (cls_str, ch_port)
if key not in instanced_io_dict:
io_new_dict = {'cls_name':cls_str, 'ch_port':ch_port}
io_new_dict['equations'] = [equation]
instanced_io_dict[key] = io_new_dict
else:
# we can have more then one equation per instance
existing_instance = instanced_io_dict[key]
equations = existing_instance['equations']
if equation not in equations:
equations.append(equation)
return instanced_io_dict
# Format IO to give to the msp430
def format_io_msp430(io_collection):
config_mapping = {}
msp430_configs = {}
i = 0
for key, io in io_collection:
cls = getattr(msp430_data.interface, io['cls_name'])
msp430_configs[str(i)] = {'pin': io['ch_port'], 'opcode': cls.IO_OPCODE}
config_mapping[str(i)] = io
i += 1
return msp430_configs, config_mapping
self.config_reads = format_io(reads)
self.config_writes = format_io(writes)
self.config_interfaces, self.config_mapping = format_io_msp430(self.config_reads.items() + self.config_writes.items())
if self.client.protocol.debug:
log.msg('MSP430ConfigState - Pushing configs to remote MSP430')
msg = {'cmd':common_protocol.ServerCommands.CONFIG,
'payload':self.config_interfaces}
self.client.protocol.sendLine(json.dumps(msg))
class MSP430StreamState(ServerState):
""" In this state the MSP430 has been configured and is streaming data"""
def __init__(self, client, reads, writes, interfaces, mapping):
super(MSP430StreamState, self).__init__(client)
# Read/Write configs is used to communicate with the web
# Interface config is used to communicate to the msp430
self.config_reads = reads
self.config_writes = writes
self.config_interfaces = interfaces
self.config_mapping = mapping
# Buffers for storing the evaluated data
self.write_data_buffer_eq = {}
self.read_data_buffer_eq = {}
self.datamsgcount_ack = 0
def evaluate_eq(self, eq, value):
if eq != '':
# TODO: fix security
x = value
new_value = eval(eq)
else:
new_value = value
return new_value
def deactivated(self):
super(MSP430StreamState, self).deactivated()
self.client.protocol.factory.ws_factory.notify_clients_msp430_state_change(self.client.protocol, state='drop_stream')
def activated(self):
super(MSP430StreamState, self).activated()
self.client.protocol.factory.ws_factory.notify_clients_msp430_state_change(self.client.protocol, state='stream')
def dataReceived(self, data):
try:
data = json.loads(data)
except ValueError:
log.msg("MSP430StreamState.dataReceived - Problem with JSON structure")
log.msg(data)
return
if data['cmd'] == common_protocol.MSP430ClientCommands.DROP_TO_CONFIG_OK:
# Order here is important, pop first!
self.client.pop_state()
self.client.current_state().config_io(self.delegate_config_reads, self.delegate_config_writes)
if data['cmd'] == common_protocol.MSP430ClientCommands.DATA:
self.datamsgcount_ack += 1
interfaces = data['interfaces']
for key, value in interfaces.iteritems():
interface = self.config_mapping[key]
cls_name = interface["cls_name"]
pin = interface["ch_port"]
cls = getattr(msp430_data.interface, cls_name)
# Convert from raw data string to correct data type
new_value = cls.parse_input(value)
# Evaluate equation
if msp430_data.interface.IWrite in cls.__bases__:
for eq in interface["equations"]:
new_key = "cls:{}, port:{}, eq:{}".format(cls_name, int(pin), eq)
self.write_data_buffer_eq[new_key] = {"calculated" : self.evaluate_eq(eq, new_value),
"real": new_value}
elif msp430_data.interface.IRead in cls.__bases__:
for eq in interface["equations"]:
new_key = "cls:{}, port:{}, eq:{}".format(cls_name, int(pin), eq)
self.read_data_buffer_eq[new_key] = self.evaluate_eq(eq, new_value)
# Notify factory to update listening clients
if self.datamsgcount_ack >= 5:
data = {'cmd':common_protocol.ServerCommands.ACK_DATA, 'count':self.datamsgcount_ack}
self.client.protocol.sendLine(json.dumps(data, sort_keys=True))
self.datamsgcount_ack = 0
# Notify factory of new data event
self.client.protocol.factory.ws_factory.msp430_new_data_event(self.client)
def resume_streaming(self):
# Starting to stream again, reset the ack count
self.datamsgcount_ack = 0
msg = {'cmd':common_protocol.ServerCommands.RESUME_STREAMING}
self.client.protocol.sendLine(json.dumps(msg))
def pause_streaming(self):
msg = {'cmd':common_protocol.ServerCommands.PAUSE_STREAMING}
self.client.protocol.sendLine(json.dumps(msg))
def write_interface_data(self, key, value):
# Get the class and port from the key
match = re.match(r'cls:([A-Za-z0-9_]+),\ port:(\d+)', key)
try:
cls_name = match.group(1)
pin = match.group(2)
except:
# TODO: add correct exception
return
cls = getattr(msp430_data.interface, cls_name)
payload = {'opcode': cls.IO_OPCODE, 'pin' : pin, 'value' : str(value)}
msg = {'cmd':common_protocol.ServerCommands.WRITE_DATA,
'payload': payload}
self.client.protocol.sendLine(json.dumps(msg))
def drop_to_config(self, reads, writes):
# Drop remote MSP430 to config state
msg = {'cmd':common_protocol.ServerCommands.DROP_TO_CONFIG}
self.client.protocol.sendLine(json.dumps(msg))
self.delegate_config_reads = reads
self.delegate_config_writes = writes
class MSP430ServerProtocol(WebSocketServerProtocol):
"""Base server protocol, instantiates child protocols"""
def __init__(self):
self.client = None
def onConnect(self, connectionRequest):
"""Connection to WebSocket Protocol"""
def user(headers):
if self.debug:
log.msg("MSP430ServerProtocol.onConnect - User connected")
return UserClient(self)
# MSP430 won't connect via WebSocket so only option is the client
paths = {
'/':user,
}
if connectionRequest.path not in paths:
raise HttpException(httpstatus.HTTP_STATUS_CODE_NOT_FOUND[0],
httpstatus.HTTP_STATUS_CODE_NOT_FOUND[1])
self.client = paths[connectionRequest.path](connectionRequest.headers)
def onMessage(self, msg, binary):
"""Message received from client"""
if self.client is None:
if self.debug:
log.msg("MSP430ServerProtocol.onMessage - No Client type")
self.failConnection()
self.client.onMessage(msg)
def onOpen(self):
WebSocketServerProtocol.onOpen(self)
if self.client is not None:
self.client.onOpen()
def onClose(self, wasClean, code, reason):
"""Connect closed, cleanup"""
# base logs
WebSocketServerProtocol.onClose(self, wasClean, code, reason)
if self.client is None:
if self.debug:
log.msg("MSP430ServerProtocol.onClose - No Client type")
return
self.client.onClose(wasClean, code, reason)
class MSP430SocketServerFactory(WebSocketServerFactory):
"""Manages every MSP430 connected to the server."""
def __init__(self, *args, **kwargs):
WebSocketServerFactory.__init__(self, *args, **kwargs)
# safari
self.allowHixie76 = True
# Identify MSP430's by their macs
# Identify user by peerstr
self.msp430_clients = {}
self.user_client = {}
# Key MSP430 mac, value list of user clients
self.msp430_clients_registered_users = {}
def register_user_to_msp430(self, client, msp430):
if len(self.msp430_clients_registered_users[msp430.mac]) == 0:
# MSP430 wasn't streaming, start streaming!
msp430.resume_streaming()
if client not in self.msp430_clients_registered_users[msp430.mac]:
self.msp430_clients_registered_users[msp430.mac].append(client)
if self.debug:
log.msg('MSP430SocketServerFactory.register_user_to_msp430 msp430:%s user:%s' %
(msp430.mac, client.protocol.peerstr))
def unregister_user_to_msp430(self, client, msp430):
client.unregister_to_msp430()
if msp430 is None:
return
if msp430.mac in self.msp430_clients_registered_users:
if client in self.msp430_clients_registered_users[msp430.mac]:
self.msp430_clients_registered_users[msp430.mac].remove(client)
if self.debug:
log.msg('msp430SocketServerFactory.unregister_user_to_msp430 msp430:%s user:%s' %
(msp430.mac, client.protocol.peerstr))
if msp430.mac not in self.msp430_clients_registered_users or len(self.msp430_clients_registered_users[msp430.mac]) == 0:
# Pause streaming
msp430.pause_streaming()
def msp430_new_data_event(self, msp430):
# resume streaming on any MSP430s waiting for new data
for client in self.msp430_clients_registered_users[msp430.mac]:
client.resume_streaming()
def copy_msp430_buffers(self, msp430, read_buffer, write_buffer):
msp430.copy_buffers(read_buffer, write_buffer)
def get_msp430(self, msp430_mac):
if msp430_mac in self.msp430_clients:
return self.msp430_clients[msp430_mac]
return None
def notify_clients_msp430_state_change(self, msp430, state='offline'):
for peerstr, user in self.user_client.iteritems():
user.notifyMSP430State(msp430, state)
def register_user(self, user):
if user.protocol.peerstr not in self.user_client:
self.user_client[user.protocol.peerstr] = user
if self.debug:
log.msg('MSP430SocketServerFactory.register_user %s' % user.protocol.peerstr)
def disconnect_user(self, user):
if self.debug:
log.msg('MSP430SocketServerFactory.disconnect_user %s' % user.protocol.peerstr)
del self.user_client[user.protocol.peerstr]
self.unregister_user_to_msp430(user, user.associated_msp430)
def register_msp430(self, msp430):
# This is called when the MSP430 has been authenticated with the WS server
# register on the site server
msp430.protocol.register_msp430()
# register locally to the factory
self.msp430_clients[msp430.mac] = msp430
self.msp430_clients_registered_users[msp430.mac] = []
if self.debug:
log.msg("MSP430SocketServerFactory.register_msp430 - %s registered, %d msp430" % (msp430.mac, len(self.msp430_clients)))
def register_msp430_wsite(self, msp430):
"""Called after MSP430 has been registed to the website"""
self.notify_clients_msp430_state_change(msp430, state='online')
def disconnect_msp430(self, msp430):
if hasattr(msp430, 'mac'):
if self.debug:
log.msg("MSP430SocketServerFactory.disconnect_msp430 - %s msp430 disconnected" % (msp430.mac,))
reactor.callInThread(msp430.protocol.disconnect_msp430)
try:
del self.msp430_clients[msp430.mac]
del self.msp430_clients_registered_users[msp430.mac]
except KeyError:
log.msg(self.msp430_clients)
def disconnect_msp430_wsite(self, msp430):
"""Called after MSP430 has been disconnected from web server"""
self.notify_clients_msp430_state_change(msp430, state='offline')
def config_msp430(self, configs):
"""
Not thread safe
configs:
dict with the following keys:
'read': lst of port configs
'write: lst of port configs
'mac': '00:00:...'
port config dict with the following keys:
'ch_port': integer or boolean (check cls req)
'equation': empty, or python style math
'cls_name': class name as string, ex) 'ADC'
Return: True/False for success
"""
# Check if MSP430 is actually an active client
mac = configs['mac']
if mac not in self.msp430_clients:
return False
msp430_client = self.msp430_clients[mac]
return msp430_client.config_io(reads=configs['read'], writes=configs['write'])
| 35.773653 | 132 | 0.614174 |
f717282762e91799ab097381b849786ec18cef78 | 213 | py | Python | tensorbay/opendataset/SegTrack2/__init__.py | Hoteryoung/tensorbay-python-sdk | 53c34dd529c20ec69b34ddd348b5c8e74f4094d0 | [
"MIT"
] | null | null | null | tensorbay/opendataset/SegTrack2/__init__.py | Hoteryoung/tensorbay-python-sdk | 53c34dd529c20ec69b34ddd348b5c8e74f4094d0 | [
"MIT"
] | null | null | null | tensorbay/opendataset/SegTrack2/__init__.py | Hoteryoung/tensorbay-python-sdk | 53c34dd529c20ec69b34ddd348b5c8e74f4094d0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copytright 2021 Graviti. Licensed under MIT License.
#
# pylint: disable=invalid-name
"""Dataloader of the SegTrack2 dataset."""
from .loader import SegTrack2
__all__ = ["SegTrack2"]
| 17.75 | 54 | 0.732394 |
f7172919cb32b1bce840769389a548df78d2e786 | 3,420 | py | Python | tempest/api/compute/admin/test_fixed_ips_negative.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/admin/test_fixed_ips_negative.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | tempest/api/compute/admin/test_fixed_ips_negative.py | midokura/tempest | b0ec1d280f057d5d9c2eda081bcbda7e381ecb3b | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class FixedIPsNegativeTestJson(base.BaseV2ComputeAdminTest):
@classmethod
def resource_setup(cls):
super(FixedIPsNegativeTestJson, cls).resource_setup()
if CONF.service_available.neutron:
msg = ("%s skipped as neutron is available" % cls.__name__)
raise cls.skipException(msg)
cls.client = cls.os_adm.fixed_ips_client
cls.non_admin_client = cls.fixed_ips_client
server = cls.create_test_server(wait_until='ACTIVE')
server = cls.servers_client.get_server(server['id'])
for ip_set in server['addresses']:
for ip in server['addresses'][ip_set]:
if ip['OS-EXT-IPS:type'] == 'fixed':
cls.ip = ip['addr']
break
if cls.ip:
break
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_list_fixed_ip_details_with_non_admin_user(self):
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.get_fixed_ip_details, self.ip)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_non_admin_user(self):
body = {"reserve": "None"}
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_unreserve_with_non_admin_user(self):
body = {"unreserve": "None"}
self.assertRaises(lib_exc.Unauthorized,
self.non_admin_client.reserve_fixed_ip,
self.ip, body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_set_reserve_with_invalid_ip(self):
# NOTE(maurosr): since this exercises the same code snippet, we do it
# only for reserve action
body = {"reserve": "None"}
# NOTE(eliqiao): in Juno, the exception is NotFound, but in master, we
# change the error code to BadRequest, both exceptions should be
# accepted by tempest
self.assertRaises((lib_exc.NotFound, lib_exc.BadRequest),
self.client.reserve_fixed_ip,
"my.invalid.ip", body)
@test.attr(type=['negative', 'gate'])
@test.services('network')
def test_fixed_ip_with_invalid_action(self):
body = {"invalid_action": "None"}
self.assertRaises(lib_exc.BadRequest,
self.client.reserve_fixed_ip,
self.ip, body)
| 39.767442 | 78 | 0.635965 |
f7173966ea1c7e65d2e1a2dd36186f075d2562fc | 5,614 | py | Python | examples/Cliner/CliNER/code/feature_extraction/umls_dir/create_sqliteDB.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | examples/Cliner/CliNER/code/feature_extraction/umls_dir/create_sqliteDB.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | examples/Cliner/CliNER/code/feature_extraction/umls_dir/create_sqliteDB.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | # database.py creates a .db file for performing umls searches.
import atexit
import os
import sqlite3
import sys
from read_config import enabled_modules
features_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if features_dir not in sys.path:
sys.path.append(features_dir)
# find where umls tables are located
enabled = enabled_modules()
umls_tables = enabled['UMLS']
# set to True when create_db() is succesful
success = False
db_path = None
conn = None
MRSTY_TABLE_FILE = None
MRCON_TABLE_FILE = None
MRREL_TABLE_FILE = None
LRABR_TABLE_FILE = None
# this ensure files are closed properly and umls.db is removed if not succesful
@atexit.register
def umls_db_cleanup():
# pylint: disable=global-statement
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
if conn is not None:
conn.close()
if MRSTY_TABLE_FILE is not None:
MRSTY_TABLE_FILE.close()
if MRCON_TABLE_FILE is not None:
MRCON_TABLE_FILE.close()
if MRREL_TABLE_FILE is not None:
MRREL_TABLE_FILE.close()
if LRABR_TABLE_FILE is not None:
LRABR_TABLE_FILE.close()
if success is False:
# remove umls.db, it is junk now
if db_path is not None:
os.remove(db_path)
def create_db():
# pylint: disable=global-statement
global success
global conn
global db_path
global MRSTY_TABLE_FILE
global MRCON_TABLE_FILE
global MRREL_TABLE_FILE
global LRABR_TABLE_FILE
print("\ncreating umls.db")
# connect to the .db file we are creating.
db_path = os.path.join(umls_tables, 'umls.db')
conn = sqlite3.connect(db_path)
conn.text_factory = str
print("opening files")
# load data in files.
try:
mrsty_path = os.path.join(umls_tables, 'MRSTY.RRF')
MRSTY_TABLE_FILE = open(mrsty_path, "r")
except IOError:
print("\nNo file to use for creating MRSTY.RRF table\n")
sys.exit()
try:
mrcon_path = os.path.join(umls_tables, 'MRCONSO.RRF')
MRCON_TABLE_FILE = open(mrcon_path, "r")
except IOError:
print("\nNo file to use for creating MRCONSO.RRF table\n")
sys.exit()
try:
mrrel_path = os.path.join(umls_tables, 'MRREL.RRF')
MRREL_TABLE_FILE = open(mrrel_path, "r")
except IOError:
print("\nNo file to use for creating MRREL.RRF table\n")
sys.exit()
try:
lrabr_path = os.path.join(umls_tables, 'LRABR')
LRABR_TABLE_FILE = open(lrabr_path, "r")
except IOError:
print("\nNo file to use for creating LRABR table\n")
sys.exit()
print("creating tables")
c = conn.cursor()
# create tables.
c.execute("CREATE TABLE MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) ;")
c.execute(
"CREATE TABLE MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, SAUI, \
SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) ;")
c.execute(
"CREATE TABLE MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, \
RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF );")
c.execute("CREATE TABLE LRABR( EUI1, ABR, TYPE, EUI2, STR);")
print("inserting data into MRSTY table")
for line in MRSTY_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 6
c.execute("INSERT INTO MRSTY( CUI, TUI, STN, STY, ATUI, CVF ) \
values( ?, ?, ?, ?, ?, ?)", tuple(line))
print("inserting data into MRCON table")
for line in MRCON_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 18
c.execute(
"INSERT INTO MRCON( CUI, LAT, TS, LUI, STT, SUI, ISPREF, AUI, \
SAUI, SCUI, SDUI, SAB, TTY, CODE, STR, SRL, SUPPRESS, CVF ) \
values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);",
tuple(line))
print("inserting data into MRREL table")
for line in MRREL_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
# end will always be empty str
line.pop()
assert len(line) == 16
c.execute(
"INSERT INTO MRREL( CUI1, AUI1, STYPE1, REL, CUI2, AUI2, STYPE2, \
RELA, RUI, SRUI, SAB, SL, RG, DIR, SUPPRESS, CVF ) \
values( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ? )",
tuple(line))
print("inserting into LRABR table")
for line in LRABR_TABLE_FILE:
line = line.strip('\n')
line = line.split('|')
line.pop()
assert len(line) == 5
c.execute("INSERT INTO LRABR( EUI1, ABR, TYPE, EUI2, STR) \
values( ?, ?, ?, ?,?)", tuple(line))
print("creating indices")
# create indices for faster queries
c.execute("CREATE INDEX mrsty_cui_map ON MRSTY(CUI)")
c.execute("CREATE INDEX mrcon_str_map ON MRCON(STR)")
c.execute("CREATE INDEX mrcon_cui_map ON MRCON(CUI)")
c.execute("CREATE INDEX mrrel_cui2_map ON MRREL( CUI2 )")
c.execute("CREATE INDEX mrrel_cui1_map on MRREL( CUI1 ) ")
c.execute("CREATE INDEX mrrel_rel_map on MRREL( REL )")
c.execute("CREATE INDEX lrabr_abr_map on LRABR(ABR)")
c.execute("CREATE INDEX lrabr_str_map on LRABR(STR)")
# save changes to .db
conn.commit()
success = True
print("\nsqlite database created")
if __name__ == "__main__":
create_db()
| 27.385366 | 79 | 0.612576 |
f717576ebe1b232b2fdba0695ea262b2ae5063cc | 2,568 | py | Python | src/base/base_train.py | MohamedAli1995/Cifar-100-Classifier | 924704a81ce13062825a88b90b80e8ac2ba45d63 | [
"MIT"
] | 2 | 2019-05-12T16:11:20.000Z | 2020-04-10T22:39:57.000Z | src/base/base_train.py | MohamedAli1995/Cifar-100-Classifier | 924704a81ce13062825a88b90b80e8ac2ba45d63 | [
"MIT"
] | null | null | null | src/base/base_train.py | MohamedAli1995/Cifar-100-Classifier | 924704a81ce13062825a88b90b80e8ac2ba45d63 | [
"MIT"
] | null | null | null | import tensorflow as tf
class BaseTrain:
"""Standard base_train-class for easy multiple-inheritance.
It is responsible for defining the functions to be implemented with any child.
Attributes:
sess: Tensorflow session to use.
model: Model to be trained.
data: Data_loader object to interact with dataset.
config: Config object to store data related to training, testing and validation.
logger: Logger object to use tensorboard.
"""
def __init__(self, sess, model, data, config, logger):
self.model = model
self.config = config
self.sess = sess
self.data = data
self.logger = logger
if not self.config.pretrain: # If not pretrain then initialize variables.
self.init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
self.sess.run(self.init)
def train(self):
"""Train the model for the number of epochs in config.num_epochs.
Calls validate_epoch if config.use_val is set to true and per config.val_per_epoch.
Returns:
"""
for cur_epoch in range(self.model.cur_epoch_tensor.eval(self.sess), self.config.num_epochs + 1, 1):
self.data.prepare_new_epoch_data()
self.train_epoch()
if self.config.use_val and (
cur_epoch % self.config.val_per_epoch == 0 or cur_epoch == self.config.num_epochs):
self.validate_epoch()
self.sess.run(self.model.increment_cur_epoch_tensor)
def train_epoch(self):
"""Implements the logic of training_epoch:
-Loop over the batches of the training data and call the train step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def train_step(self):
"""Implements the logic of the train step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplementedError
def validate_epoch(self):
"""Implements the logic of validation_epoch:
-Loop over the batches of the validation data and call the validate step for each.
-Add any summaries you want using the summary
"""
raise NotImplemented
def validate_step(self):
"""Implements the logic of the validate step:
-Run the tensorflow session
-Returns:
Any of the metrics needs to be summarized.
"""
raise NotImplemented
| 36.169014 | 115 | 0.640576 |
f7177676b64b016a2006776e619b093446b0ff41 | 5,353 | py | Python | test/language/choice_types/python/UInt64ParamChoiceTest.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | 2 | 2019-02-06T17:50:24.000Z | 2019-11-20T16:51:34.000Z | test/language/choice_types/python/UInt64ParamChoiceTest.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | 1 | 2019-11-25T16:25:51.000Z | 2019-11-25T18:09:39.000Z | test/language/choice_types/python/UInt64ParamChoiceTest.py | PeachOS/zserio | ea01f6906c125a6baab7e8ed865eeb08cd46c37c | [
"BSD-3-Clause"
] | null | null | null | import unittest
import zserio
from testutils import getZserioApi
class UInt64ParamChoiceTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "choice_types.zs").uint64_param_choice
def testSelectorConstructor(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(self.VARIANT_A_SELECTOR, uint64ParamChoice.getSelector())
def testFromReader(self):
selector = self.VARIANT_B_SELECTOR
value = 234
writer = zserio.BitStreamWriter()
UInt64ParamChoiceTest._writeUInt64ParamChoiceToStream(writer, selector, value)
reader = zserio.BitStreamReader(writer.getByteArray())
uint64ParamChoice = self.api.UInt64ParamChoice.fromReader(reader, selector)
self.assertEqual(selector, uint64ParamChoice.getSelector())
self.assertEqual(value, uint64ParamChoice.getB())
def testEq(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
value = 99
uint64ParamChoice1.setA(value)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
uint64ParamChoice2.setA(value)
self.assertTrue(uint64ParamChoice1 == uint64ParamChoice2)
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertFalse(uint64ParamChoice1 == uint64ParamChoice2)
def testHash(self):
uint64ParamChoice1 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
uint64ParamChoice2 = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
value = 99
uint64ParamChoice1.setA(value)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
uint64ParamChoice2.setA(value)
self.assertEqual(hash(uint64ParamChoice1), hash(uint64ParamChoice2))
diffValue = value + 1
uint64ParamChoice2.setA(diffValue)
self.assertTrue(hash(uint64ParamChoice1) != hash(uint64ParamChoice2))
def testGetSelector(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
self.assertEqual(self.VARIANT_C_SELECTOR, uint64ParamChoice.getSelector())
def testGetSetA(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
value = 99
uint64ParamChoice.setA(value)
self.assertEqual(value, uint64ParamChoice.getA())
def testGetSetB(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
value = 234
uint64ParamChoice.setB(value)
self.assertEqual(value, uint64ParamChoice.getB())
def testGetSetC(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_C_SELECTOR)
value = 23456
uint64ParamChoice.setC(value)
self.assertEqual(value, uint64ParamChoice.getC())
def testBitSizeOf(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
self.assertEqual(8, uint64ParamChoice.bitSizeOf())
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(16, uint64ParamChoice.bitSizeOf())
def testInitializeOffsets(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
bitPosition = 1
self.assertEqual(9, uint64ParamChoice.initializeOffsets(bitPosition))
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
self.assertEqual(17, uint64ParamChoice.initializeOffsets(bitPosition))
def testReadWrite(self):
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
byteValue = 99
uint64ParamChoice.setA(byteValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_A_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(byteValue, readUInt64ParamChoice.getA())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
uint64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
shortValue = 234
uint64ParamChoice.setB(shortValue)
writer = zserio.BitStreamWriter()
uint64ParamChoice.write(writer)
readUInt64ParamChoice = self.api.UInt64ParamChoice(self.VARIANT_B_SELECTOR)
reader = zserio.BitStreamReader(writer.getByteArray())
readUInt64ParamChoice.read(reader)
self.assertEqual(shortValue, readUInt64ParamChoice.getB())
self.assertEqual(uint64ParamChoice, readUInt64ParamChoice)
@staticmethod
def _writeUInt64ParamChoiceToStream(writer, selector, value):
if selector == 1:
writer.writeSignedBits(value, 8)
elif selector in (2, 3, 4):
writer.writeSignedBits(value, 16)
elif selector in (5, 6):
pass
else:
writer.writeSignedBits(value, 32)
VARIANT_A_SELECTOR = 1
VARIANT_B_SELECTOR = 2
VARIANT_C_SELECTOR = 7
| 40.862595 | 86 | 0.721838 |
f7178a5f0eaaff23bb2efef24ecd4ae204c5ee9e | 638 | py | Python | src/django_backend_api/manage.py | Adityaraj1711/django-backend-architecture | 7f3c270af0cb5dd2ebc097c7436a4958cd48ff7c | [
"MIT"
] | 25 | 2020-04-28T19:25:28.000Z | 2021-07-04T17:24:35.000Z | src/django_backend_api/manage.py | Adityaraj1711/django-backend-architecture | 7f3c270af0cb5dd2ebc097c7436a4958cd48ff7c | [
"MIT"
] | 13 | 2020-08-05T22:40:37.000Z | 2022-03-12T00:24:36.000Z | src/django_backend_api/manage.py | Adityaraj1711/django-backend-architecture | 7f3c270af0cb5dd2ebc097c7436a4958cd48ff7c | [
"MIT"
] | 2 | 2020-10-29T13:10:01.000Z | 2021-11-22T01:55:14.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_backend_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29 | 82 | 0.688088 |
f717ba6db19fac8fe6c6bc3c5388211b577e2db4 | 871 | py | Python | tests/unit/fake_data_root/kubernetes/var/lib/juju/agents/unit-containerd-2/charm/hooks/relations/untrusted-container-runtime/requires.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 6 | 2021-10-01T19:46:14.000Z | 2022-03-31T17:05:08.000Z | tests/unit/fake_data_root/kubernetes/var/lib/juju/agents/unit-containerd-2/charm/hooks/relations/untrusted-container-runtime/requires.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 111 | 2021-10-01T18:18:17.000Z | 2022-03-29T12:23:20.000Z | tests/unit/fake_data_root/kubernetes/var/lib/juju/agents/unit-containerd-2/charm/hooks/relations/untrusted-container-runtime/requires.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 10 | 2021-09-29T14:47:54.000Z | 2022-03-18T14:52:16.000Z | from charms.reactive import (
Endpoint,
set_flag,
clear_flag
)
from charms.reactive import (
when,
when_not
)
class ContainerRuntimeRequires(Endpoint):
@when('endpoint.{endpoint_name}.changed')
def changed(self):
set_flag(self.expand_name('endpoint.{endpoint_name}.available'))
@when_not('endpoint.{endpoint_name}.joined')
def broken(self):
clear_flag(self.expand_name('endpoint.{endpoint_name}.available'))
def set_config(self, name, binary_path):
"""
Set the configuration to be published.
:param name: String name of runtime
:param binary_path: String runtime executable
:return: None
"""
for relation in self.relations:
relation.to_publish.update({
'name': name,
'binary_path': binary_path
})
| 24.885714 | 74 | 0.626866 |
f717ba7e0a6e1a58c6c5756909c027da10c010bb | 878 | py | Python | read_exif.py | kiyoon/camera-tools | 2e269141597dd27ec7d41e49285e01ef566cb54c | [
"MIT"
] | 1 | 2021-11-14T23:30:47.000Z | 2021-11-14T23:30:47.000Z | read_exif.py | kiyoon/camera-tools | 2e269141597dd27ec7d41e49285e01ef566cb54c | [
"MIT"
] | null | null | null | read_exif.py | kiyoon/camera-tools | 2e269141597dd27ec7d41e49285e01ef566cb54c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
class Formatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter):
pass
parser = argparse.ArgumentParser(
description='''Read EXIF data
Author: Kiyoon Kim (yoonkr33@gmail.com)''',
formatter_class=Formatter)
parser.add_argument('input_files', type=str, nargs='+',
help='files to read metadata')
args = parser.parse_args()
import glob
import os
import exiftool
import pprint
if __name__ == "__main__":
for origpath in args.input_files:
for path in glob.glob(origpath): # glob: Windows wildcard support
root, fname_ext = os.path.split(path)
fname, fext = os.path.splitext(fname_ext)
with exiftool.ExifTool() as et:
metadata = et.get_metadata(path)
print(path)
pprint.pprint(metadata)
| 24.388889 | 94 | 0.671982 |
f717bdb31e80489aaefa7fbd595c263379b27f37 | 15,376 | py | Python | src/hub/dataload/sources/chembl/chembl_upload.py | ravila4/mychem.info | 9b63b5f0957b5e7b252ca8122734a363905036b3 | [
"Apache-2.0"
] | null | null | null | src/hub/dataload/sources/chembl/chembl_upload.py | ravila4/mychem.info | 9b63b5f0957b5e7b252ca8122734a363905036b3 | [
"Apache-2.0"
] | null | null | null | src/hub/dataload/sources/chembl/chembl_upload.py | ravila4/mychem.info | 9b63b5f0957b5e7b252ca8122734a363905036b3 | [
"Apache-2.0"
] | null | null | null | """
Chembl uploader
"""
# pylint: disable=E0401, E0611
import os
import glob
import pymongo
import biothings.hub.dataload.storage as storage
from biothings.hub.dataload.uploader import ParallelizedSourceUploader
from hub.dataload.uploader import BaseDrugUploader
from hub.datatransform.keylookup import MyChemKeyLookup
from .chembl_parser import load_data
SRC_META = {
"url": 'https://www.ebi.ac.uk/chembl/',
"license_url" : "https://www.ebi.ac.uk/about/terms-of-use",
"license_url_short" : "http://bit.ly/2KAUCAm"
}
class ChemblUploader(BaseDrugUploader, ParallelizedSourceUploader):
"""
ChemblUploader - upload the Chembl data source
"""
name = "chembl"
storage_class = storage.RootKeyMergerStorage
__metadata__ = {"src_meta" : SRC_META}
MOLECULE_PATTERN = "molecule.*.json"
keylookup = MyChemKeyLookup(
[("inchikey", "chembl.inchi_key"),
("inchi", "chembl.inchi"),
("chembl", "chembl.molecule_chembl_id"),
("chebi", "chembl.chebi_par_id"),
("drugcentral", "chembl.xrefs.drugcentral.id"),
("drugname", "chembl.pref_name")],
# TODO: handle duplicate keys from pubchem
# - we use RootKeyMergerStorage, but the num. duplicates
# - is too high (>10000)
# ("pubchem", "chembl.xrefs.pubchem.sid"),
copy_from_doc=True)
def jobs(self):
"""
this will generate arguments for self.load.data() method, allowing parallelization
"""
json_files = glob.glob(os.path.join(self.data_folder, self.__class__.MOLECULE_PATTERN))
return [(f,) for f in json_files]
def load_data(self, data_folder):
"""load data from an input file"""
self.logger.info("Load data from '%s'" % data_folder)
return self.keylookup(load_data, debug=True)(data_folder)
def post_update_data(self, *args, **kwargs):
"""create indexes following an update"""
# pylint: disable=W0613
"""
for idxname in ["chembl.chebi_par_id", "chembl.inchi", "chembl.molecule_chembl_id"]:
self.logger.info("Indexing '%s'" % idxname)
# background=true or it'll lock the whole database...
self.collection.create_index(idxname, background=True)
"""
for idxname in ["chembl.chebi_par_id", "chembl.molecule_chembl_id"]:
self.logger.info("Indexing '%s'" % idxname)
# background=true or it'll lock the whole database...
self.collection.create_index(idxname, background=True)
@classmethod
def get_mapping(cls):
"""return mapping data"""
mapping = {
"chembl": {
"properties": {
"biotherapeutic": {
"properties": {
"helm_notation": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"description": {
"type": "text"
},
"biocomponents": {
"properties": {
"organism": {
"type": "text"
},
"tax_id": {
"type": "integer"
},
"sequence": {
"type": "text"
},
"component_id": {
"type": "integer"
},
"description": {
"type": "text"
},
"component_type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"molecule_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
'copy_to': ['all'],
}
}
},
"therapeutic_flag": {
"type": "boolean"
},
"usan_stem": {
"type": "text"
},
"molecule_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"molecule_properties": {
"properties": {
"heavy_atoms": {
"type": "integer"
},
"acd_most_bpka": {
"type": "float"
},
"mw_freebase": {
"type": "float"
},
"num_ro5_violations": {
"type": "integer"
},
"molecular_species": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"qed_weighted": {
"type": "float"
},
"ro3_pass": {
"type": "boolean"
},
"full_mwt": {
"type": "float"
},
"num_lipinski_ro5_violations": {
"type": "integer"
},
"rtb": {
"type": "integer"
},
"psa": {
"type": "float"
},
"alogp": {
"type": "float"
},
"hbd": {
"type": "integer"
},
"acd_most_apka": {
"type": "float"
},
"hbd_lipinski": {
"type": "integer"
},
"acd_logp": {
"type": "float"
},
"full_molformula": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"aromatic_rings": {
"type": "integer"
},
"hba_lipinski": {
"type": "integer"
},
"mw_monoisotopic": {
"type": "float"
},
"hba": {
"type": "integer"
},
"acd_logd": {
"type": "float"
}
}
},
"helm_notation": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"max_phase": {
"type": "integer"
},
"inorganic_flag": {
"type": "integer"
},
"usan_stem_definition": {
"type": "text"
},
"dosed_ingredient": {
"type": "boolean"
},
"chebi_par_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"withdrawn_reason": {
"type": "text"
},
"molecule_hierarchy": {
"properties": {
"parent_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"molecule_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"prodrug": {
"type": "integer"
},
"withdrawn_flag": {
"type": "boolean"
},
"usan_year": {
"type": "integer"
},
"parenteral": {
"type": "boolean"
},
"black_box_warning": {
"type": "integer"
},
"polymer_flag": {
"type": "boolean"
},
"molecule_synonyms": {
"properties": {
"molecule_synonym": {
"type": "text"
},
"synonyms": {
"type": "text"
},
"syn_type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"atc_classifications": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"molecule_type": {
"type": "text"
},
"first_in_class": {
"type": "integer"
},
"inchi": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"structure_type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"withdrawn_class": {
"type": "text"
},
"inchi_key": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"topical": {
"type": "boolean"
},
"oral": {
"type": "boolean"
},
"xrefs": {
"properties": {
"drugcentral": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
}
}
},
"tg-gates": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
}
}
},
"wikipedia": {
"properties": {
"url_stub": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"dailymed": {
"properties": {
"name": {
"type": "text"
}
}
},
"pubchem": {
"properties": {
"sid": {
"type": "integer"
}
}
}
}
},
"chirality": {
"type": "integer"
},
"usan_substem": {
"type": "text"
},
"indication_class": {
"type": "text"
},
"withdrawn_country": {
"type": "text"
},
"withdrawn_year": {
"type": "integer"
},
"availability_type": {
"type": "integer"
},
"smiles": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"natural_product": {
"type": "integer"
},
"pref_name": {
"type": "text",
"copy_to": ["all"]
},
"first_approval": {
"type": "integer"
}
}
}
}
return mapping
| 40.569921 | 95 | 0.280437 |
f717c2aceaf306d0f7eae56f9df7f70d7fb7e56b | 2,488 | py | Python | usort/util.py | thatch/usort | 2ca1ff63d6cfc79e76ea95b69d162f4579c3fa3c | [
"MIT"
] | null | null | null | usort/util.py | thatch/usort | 2ca1ff63d6cfc79e76ea95b69d162f4579c3fa3c | [
"MIT"
] | null | null | null | usort/util.py | thatch/usort | 2ca1ff63d6cfc79e76ea95b69d162f4579c3fa3c | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
from contextvars import ContextVar
from pathlib import Path
from time import monotonic
from typing import Callable, Generator, List, Optional, Tuple
import libcst as cst
TIMINGS: ContextVar[List[Tuple[str, float]]] = ContextVar("TIMINGS")
@contextmanager
def timed(msg: str) -> Generator[None, None, None]:
"""
Records the monotonic duration of the contained context, with a given description.
Timings are stored for later use/printing with `print_timings()`.
"""
before = monotonic()
yield
after = monotonic()
try:
TIMINGS.get().append((msg, after - before))
except LookupError:
pass
@contextmanager
def save_timings(to: List[Tuple[str, float]]) -> Generator[None, None, None]:
token = TIMINGS.set([])
yield
to.extend(TIMINGS.get())
TIMINGS.reset(token)
def merge_timings(more: List[Tuple[str, float]]) -> None:
TIMINGS.get().extend(more)
def print_timings(fn: Callable[[str], None] = print) -> None:
"""
Print all stored timing values in microseconds.
"""
for msg, duration in TIMINGS.get():
fn(f"{msg + ':':50} {int(duration*1000000):7} µs")
def try_parse(path: Path, data: Optional[bytes] = None) -> cst.Module:
"""
Attempts to parse the file with all syntax versions known by LibCST.
If parsing fails on all supported grammar versions, then raises the parser error
from the first/newest version attempted.
"""
if data is None:
data = path.read_bytes()
with timed(f"parsing {path}"):
parse_error: Optional[cst.ParserSyntaxError] = None
for version in cst.KNOWN_PYTHON_VERSION_STRINGS[::-1]:
try:
mod = cst.parse_module(
data, cst.PartialParserConfig(python_version=version)
)
return mod
except cst.ParserSyntaxError as e:
# keep the first error we see in case parsing fails on all versions
if parse_error is None:
parse_error = e
# not caring about existing traceback here because it's not useful for parse
# errors, and usort_path is already going to wrap it in a custom class
raise parse_error or Exception("unknown parse failure")
| 30.716049 | 86 | 0.658762 |
f717c597ce99c986760a12c223c014567cf34f38 | 96,886 | py | Python | ThirdParty/ZopeInterface/zope/interface/tests/test_registry.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 3 | 2016-02-01T02:29:51.000Z | 2020-09-04T17:19:24.000Z | ThirdParty/ZopeInterface/zope/interface/tests/test_registry.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 7 | 2021-02-08T20:22:15.000Z | 2022-03-11T23:19:41.000Z | ThirdParty/ZopeInterface/zope/interface/tests/test_registry.py | OpenGeoscience/VTK | a373e975b9284a022f43a062ebf5042bb17b4e44 | [
"BSD-3-Clause"
] | 6 | 2017-02-13T09:11:02.000Z | 2021-06-29T11:22:18.000Z | ##############################################################################
#
# Copyright (c) 2001, 2002, 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Component Registry Tests"""
import unittest
class _SilencePy3Deprecations(unittest.TestCase):
# silence deprecation warnings under py3
def failUnless(self, expr):
# St00pid speling.
return self.assertTrue(expr)
def failIf(self, expr):
# St00pid speling.
return self.assertFalse(expr)
class ComponentsTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.registry import Components
return Components
def _makeOne(self, name='test', *args, **kw):
return self._getTargetClass()(name, *args, **kw)
def _wrapEvents(self):
from zope.interface import registry
_events = []
def _notify(*args, **kw):
_events.append((args, kw))
_monkey = _Monkey(registry, notify=_notify)
return _monkey, _events
def test_ctor_no_bases(self):
from zope.interface.adapter import AdapterRegistry
comp = self._makeOne('testing')
self.assertEqual(comp.__name__, 'testing')
self.assertEqual(comp.__bases__, ())
self.failUnless(isinstance(comp.adapters, AdapterRegistry))
self.failUnless(isinstance(comp.utilities, AdapterRegistry))
self.assertEqual(comp.adapters.__bases__, ())
self.assertEqual(comp.utilities.__bases__, ())
self.assertEqual(comp._utility_registrations, {})
self.assertEqual(comp._adapter_registrations, {})
self.assertEqual(comp._subscription_registrations, [])
self.assertEqual(comp._handler_registrations, [])
def test_ctor_w_base(self):
base = self._makeOne('base')
comp = self._makeOne('testing', (base,))
self.assertEqual(comp.__name__, 'testing')
self.assertEqual(comp.__bases__, (base,))
self.assertEqual(comp.adapters.__bases__, (base.adapters,))
self.assertEqual(comp.utilities.__bases__, (base.utilities,))
def test___repr__(self):
comp = self._makeOne('testing')
self.assertEqual(repr(comp), '<Components testing>')
# test _init_registries / _init_registrations via only caller, __init__.
def test_assign_to___bases__(self):
base1 = self._makeOne('base1')
base2 = self._makeOne('base2')
comp = self._makeOne()
comp.__bases__ = (base1, base2)
self.assertEqual(comp.__bases__, (base1, base2))
self.assertEqual(comp.adapters.__bases__,
(base1.adapters, base2.adapters))
self.assertEqual(comp.utilities.__bases__,
(base1.utilities, base2.utilities))
def test_registerUtility_both_factory_and_component(self):
def _factory():
pass
_to_reg = object()
comp = self._makeOne()
self.assertRaises(TypeError, comp.registerUtility,
component=_to_reg, factory=_factory)
def test_registerUtility_w_component(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = object()
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerUtility(_to_reg, ifoo, _name, _info)
self.failUnless(comp.utilities._adapters[0][ifoo][_name] is _to_reg)
self.assertEqual(comp._utility_registrations[ifoo, _name],
(_to_reg, _info, None))
self.assertEqual(comp.utilities._subscribers[0][ifoo][''], (_to_reg,))
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _to_reg)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is None)
def test_registerUtility_w_factory(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = object()
def _factory():
return _to_reg
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerUtility(None, ifoo, _name, _info, factory=_factory)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _to_reg)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _factory)
def test_registerUtility_no_provided_available(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
class Foo(object):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = Foo()
comp = self._makeOne()
self.assertRaises(TypeError,
comp.registerUtility, _to_reg, None, _name, _info)
def test_registerUtility_wo_provided(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
class Foo(object):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = Foo()
directlyProvides(_to_reg, ifoo)
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerUtility(_to_reg, None, _name, _info)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _to_reg)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is None)
def test_registerUtility_duplicates_existing_reg(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, _name, _info)
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerUtility(_to_reg, ifoo, _name, _info)
self.assertEqual(len(_events), 0)
def test_registerUtility_replaces_existing_reg(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.interfaces import Registered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_before, _after = object(), object()
comp = self._makeOne()
comp.registerUtility(_before, ifoo, _name, _info)
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerUtility(_after, ifoo, _name, _info)
self.assertEqual(len(_events), 2)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _before)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is None)
args, kw = _events[1]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _after)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is None)
def test_registerUtility_w_existing_subscr(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name1 = _u('name1')
_name2 = _u('name2')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, _name1, _info)
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerUtility(_to_reg, ifoo, _name2, _info)
self.assertEqual(comp.utilities._subscribers[0][ifoo][''], (_to_reg,))
def test_registerUtility_wo_event(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = object()
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerUtility(_to_reg, ifoo, _name, _info, False)
self.assertEqual(len(_events), 0)
def test_unregisterUtility_neither_factory_nor_component_nor_provided(self):
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterUtility,
component=None, provided=None, factory=None)
def test_unregisterUtility_both_factory_and_component(self):
def _factory():
pass
_to_reg = object()
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterUtility,
component=_to_reg, factory=_factory)
def test_unregisterUtility_w_component_miss(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_name = _u('name')
_to_reg = object()
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterUtility(_to_reg, ifoo, _name)
self.failIf(unreg)
self.failIf(_events)
def test_unregisterUtility_w_component(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_name = _u('name')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, _name)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterUtility(_to_reg, ifoo, _name)
self.failUnless(unreg)
self.failIf(comp.utilities._adapters) # all erased
self.failIf((ifoo, _name) in comp._utility_registrations)
self.failIf(comp.utilities._subscribers)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _to_reg)
self.failUnless(event.object.factory is None)
def test_unregisterUtility_w_factory(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = object()
def _factory():
return _to_reg
comp = self._makeOne()
comp.registerUtility(None, ifoo, _name, _info, factory=_factory)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterUtility(None, ifoo, _name, factory=_factory)
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _to_reg)
self.failUnless(event.object.factory is _factory)
def test_unregisterUtility_wo_explicit_provided(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
class Foo(object):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = Foo()
directlyProvides(_to_reg, ifoo)
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, _name, _info)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterUtility(_to_reg, None, _name)
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _to_reg)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is None)
def test_unregisterUtility_wo_component_or_factory(self):
from zope.interface.declarations import directlyProvides
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import UtilityRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
class Foo(object):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
_to_reg = Foo()
directlyProvides(_to_reg, ifoo)
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, _name, _info)
_monkey, _events = self._wrapEvents()
with _monkey:
# Just pass the interface / name
unreg = comp.unregisterUtility(provided=ifoo, name=_name)
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, UtilityRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.failUnless(event.object.name is _name)
self.failUnless(event.object.component is _to_reg)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is None)
def test_unregisterUtility_w_existing_subscr(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name1 = _u('name1')
_name2 = _u('name2')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, _name1, _info)
comp.registerUtility(_to_reg, ifoo, _name2, _info)
_monkey, _events = self._wrapEvents()
with _monkey:
comp.unregisterUtility(_to_reg, ifoo, _name2)
self.assertEqual(comp.utilities._subscribers[0][ifoo][''], (_to_reg,))
def test_registeredUtilities_empty(self):
comp = self._makeOne()
self.assertEqual(list(comp.registeredUtilities()), [])
def test_registeredUtilities_notempty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
from zope.interface.registry import UtilityRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name1 = _u('name1')
_name2 = _u('name2')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, _name1, _info)
comp.registerUtility(_to_reg, ifoo, _name2, _info)
reg = sorted(comp.registeredUtilities(), key=lambda r: r.name)
self.assertEqual(len(reg), 2)
self.failUnless(isinstance(reg[0], UtilityRegistration))
self.failUnless(reg[0].registry is comp)
self.failUnless(reg[0].provided is ifoo)
self.failUnless(reg[0].name is _name1)
self.failUnless(reg[0].component is _to_reg)
self.failUnless(reg[0].info is _info)
self.failUnless(reg[0].factory is None)
self.failUnless(isinstance(reg[1], UtilityRegistration))
self.failUnless(reg[1].registry is comp)
self.failUnless(reg[1].provided is ifoo)
self.failUnless(reg[1].name is _name2)
self.failUnless(reg[1].component is _to_reg)
self.failUnless(reg[1].info is _info)
self.failUnless(reg[1].factory is None)
def test_queryUtility_miss_no_default(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
self.failUnless(comp.queryUtility(ifoo) is None)
def test_queryUtility_miss_w_default(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
_default = object()
self.failUnless(comp.queryUtility(ifoo, default=_default) is _default)
def test_queryUtility_hit(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo)
self.failUnless(comp.queryUtility(ifoo) is _to_reg)
def test_getUtility_miss(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import ComponentLookupError
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
self.assertRaises(ComponentLookupError, comp.getUtility, ifoo)
def test_getUtility_hit(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo)
self.failUnless(comp.getUtility(ifoo) is _to_reg)
def test_getUtilitiesFor_miss(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
self.assertEqual(list(comp.getUtilitiesFor(ifoo)), [])
def test_getUtilitiesFor_hit(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_name1 = _u('name1')
_name2 = _u('name2')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, name=_name1)
comp.registerUtility(_to_reg, ifoo, name=_name2)
self.assertEqual(sorted(comp.getUtilitiesFor(ifoo)),
[(_name1, _to_reg), (_name2, _to_reg)])
def test_getAllUtilitiesRegisteredFor_miss(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
self.assertEqual(list(comp.getAllUtilitiesRegisteredFor(ifoo)), [])
def test_getAllUtilitiesRegisteredFor_hit(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_name1 = _u('name1')
_name2 = _u('name2')
_to_reg = object()
comp = self._makeOne()
comp.registerUtility(_to_reg, ifoo, name=_name1)
comp.registerUtility(_to_reg, ifoo, name=_name2)
self.assertEqual(list(comp.getAllUtilitiesRegisteredFor(ifoo)),
[_to_reg])
def test_registerAdapter_w_explicit_provided_and_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import AdapterRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerAdapter(_factory, (ibar,), ifoo, _name, _info)
self.failUnless(comp.adapters._adapters[1][ibar][ifoo][_name]
is _factory)
self.assertEqual(comp._adapter_registrations[(ibar,), ifoo, _name],
(_factory, _info))
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.failUnless(event.object.name is _name)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _factory)
def test_registerAdapter_no_provided_available(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
_to_reg = object()
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
self.assertRaises(TypeError, comp.registerAdapter, _Factory, (ibar,),
name=_name, info=_info)
def test_registerAdapter_wo_explicit_provided(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface.interfaces import Registered
from zope.interface.registry import AdapterRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
_to_reg = object()
@implementer(ifoo)
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerAdapter(_Factory, (ibar,), name=_name, info=_info)
self.failUnless(comp.adapters._adapters[1][ibar][ifoo][_name]
is _Factory)
self.assertEqual(comp._adapter_registrations[(ibar,), ifoo, _name],
(_Factory, _info))
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.failUnless(event.object.name is _name)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _Factory)
def test_registerAdapter_no_required_available(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
self.assertRaises(TypeError, comp.registerAdapter, _Factory,
provided=ifoo, name=_name, info=_info)
def test_registerAdapter_w_invalid_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
self.assertRaises(TypeError, comp.registerAdapter, _Factory,
ibar, provided=ifoo, name=_name, info=_info)
def test_registerAdapter_w_required_containing_None(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interface import Interface
from zope.interface.interfaces import Registered
from zope.interface.registry import AdapterRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_name = _u('name')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerAdapter(_Factory, [None], provided=ifoo,
name=_name, info=_info)
self.failUnless(comp.adapters._adapters[1][Interface][ifoo][_name]
is _Factory)
self.assertEqual(comp._adapter_registrations[(Interface,), ifoo, _name],
(_Factory, _info))
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (Interface,))
self.failUnless(event.object.name is _name)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _Factory)
def test_registerAdapter_w_required_containing_class(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface.declarations import implementedBy
from zope.interface.interfaces import Registered
from zope.interface.registry import AdapterRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
class _Factory(object):
def __init__(self, context):
self._context = context
@implementer(ibar)
class _Context(object):
pass
_ctx_impl = implementedBy(_Context)
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerAdapter(_Factory, [_Context], provided=ifoo,
name=_name, info=_info)
self.failUnless(comp.adapters._adapters[1][_ctx_impl][ifoo][_name]
is _Factory)
self.assertEqual(comp._adapter_registrations[(_ctx_impl,), ifoo, _name],
(_Factory, _info))
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (_ctx_impl,))
self.failUnless(event.object.name is _name)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _Factory)
def test_registerAdapter_w_required_containing_junk(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
self.assertRaises(TypeError, comp.registerAdapter, _Factory, [object()],
provided=ifoo, name=_name, info=_info)
def test_registerAdapter_wo_explicit_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import AdapterRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
class _Factory(object):
__component_adapts__ = (ibar,)
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerAdapter(_Factory, provided=ifoo, name=_name,
info=_info)
self.failUnless(comp.adapters._adapters[1][ibar][ifoo][_name]
is _Factory)
self.assertEqual(comp._adapter_registrations[(ibar,), ifoo, _name],
(_Factory, _info))
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.failUnless(event.object.name is _name)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _Factory)
def test_registerAdapter_wo_event(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_name = _u('name')
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerAdapter(_factory, (ibar,), ifoo, _name, _info,
event=False)
self.assertEqual(len(_events), 0)
def test_unregisterAdapter_neither_factory_nor_provided(self):
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterAdapter,
factory=None, provided=None)
def test_unregisterAdapter_neither_factory_nor_required(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterAdapter,
factory=None, provided=ifoo, required=None)
def test_unregisterAdapter_miss(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterAdapter(_Factory, (ibar,), ifoo)
self.failIf(unreg)
def test_unregisterAdapter_hit_w_explicit_provided_and_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import AdapterRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar,), ifoo)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterAdapter(_Factory, (ibar,), ifoo)
self.failUnless(unreg)
self.failIf(comp.adapters._adapters)
self.failIf(comp._adapter_registrations)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is _Factory)
def test_unregisterAdapter_wo_explicit_provided(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface.interfaces import Unregistered
from zope.interface.registry import AdapterRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
@implementer(ifoo)
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar,), ifoo)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterAdapter(_Factory, (ibar,))
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is _Factory)
def test_unregisterAdapter_wo_explicit_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import AdapterRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
__component_adapts__ = (ibar,)
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar,), ifoo)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterAdapter(_Factory, provided=ifoo)
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, AdapterRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is _Factory)
def test_registeredAdapters_empty(self):
comp = self._makeOne()
self.assertEqual(list(comp.registeredAdapters()), [])
def test_registeredAdapters_notempty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
from zope.interface.registry import AdapterRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IFoo')
_info = _u('info')
_name1 = _u('name1')
_name2 = _u('name2')
class _Factory(object):
def __init__(self, context):
pass
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar,), ifoo, _name1, _info)
comp.registerAdapter(_Factory, (ibar,), ifoo, _name2, _info)
reg = sorted(comp.registeredAdapters(), key=lambda r: r.name)
self.assertEqual(len(reg), 2)
self.failUnless(isinstance(reg[0], AdapterRegistration))
self.failUnless(reg[0].registry is comp)
self.failUnless(reg[0].provided is ifoo)
self.assertEqual(reg[0].required, (ibar,))
self.failUnless(reg[0].name is _name1)
self.failUnless(reg[0].info is _info)
self.failUnless(reg[0].factory is _Factory)
self.failUnless(isinstance(reg[1], AdapterRegistration))
self.failUnless(reg[1].registry is comp)
self.failUnless(reg[1].provided is ifoo)
self.assertEqual(reg[1].required, (ibar,))
self.failUnless(reg[1].name is _name2)
self.failUnless(reg[1].info is _info)
self.failUnless(reg[1].factory is _Factory)
def test_queryAdapter_miss_no_default(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
_context = object()
self.failUnless(comp.queryAdapter(_context, ifoo) is None)
def test_queryAdapter_miss_w_default(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
_context = object()
_default = object()
self.failUnless(
comp.queryAdapter(_context, ifoo, default=_default) is _default)
def test_queryAdapter_hit(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
def __init__(self, context):
self.context = context
@implementer(ibar)
class _Context(object):
pass
_context = _Context()
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar,), ifoo)
adapter = comp.queryAdapter(_context, ifoo)
self.failUnless(isinstance(adapter, _Factory))
self.failUnless(adapter.context is _context)
def test_getAdapter_miss(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface.interfaces import ComponentLookupError
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
@implementer(ibar)
class _Context(object):
pass
_context = _Context()
comp = self._makeOne()
self.assertRaises(ComponentLookupError,
comp.getAdapter, _context, ifoo)
def test_getAdapter_hit(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
def __init__(self, context):
self.context = context
@implementer(ibar)
class _Context(object):
pass
_context = _Context()
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar,), ifoo)
adapter = comp.getAdapter(_context, ifoo)
self.failUnless(isinstance(adapter, _Factory))
self.failUnless(adapter.context is _context)
def test_queryMultiAdapter_miss(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
ibaz = IFoo('IBaz')
@implementer(ibar)
class _Context1(object):
pass
@implementer(ibaz)
class _Context2(object):
pass
_context1 = _Context1()
_context2 = _Context2()
comp = self._makeOne()
self.assertEqual(comp.queryMultiAdapter((_context1, _context2), ifoo),
None)
def test_queryMultiAdapter_miss_w_default(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
ibaz = IFoo('IBaz')
@implementer(ibar)
class _Context1(object):
pass
@implementer(ibaz)
class _Context2(object):
pass
_context1 = _Context1()
_context2 = _Context2()
_default = object()
comp = self._makeOne()
self.failUnless(
comp.queryMultiAdapter((_context1, _context2), ifoo,
default=_default) is _default)
def test_queryMultiAdapter_hit(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
ibaz = IFoo('IBaz')
@implementer(ibar)
class _Context1(object):
pass
@implementer(ibaz)
class _Context2(object):
pass
_context1 = _Context1()
_context2 = _Context2()
class _Factory(object):
def __init__(self, context1, context2):
self.context = context1, context2
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar, ibaz), ifoo)
adapter = comp.queryMultiAdapter((_context1, _context2), ifoo)
self.failUnless(isinstance(adapter, _Factory))
self.assertEqual(adapter.context, (_context1, _context2))
def test_getMultiAdapter_miss(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface.interfaces import ComponentLookupError
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
ibaz = IFoo('IBaz')
@implementer(ibar)
class _Context1(object):
pass
@implementer(ibaz)
class _Context2(object):
pass
_context1 = _Context1()
_context2 = _Context2()
comp = self._makeOne()
self.assertRaises(ComponentLookupError,
comp.getMultiAdapter, (_context1, _context2), ifoo)
def test_getMultiAdapter_hit(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
ibaz = IFoo('IBaz')
@implementer(ibar)
class _Context1(object):
pass
@implementer(ibaz)
class _Context2(object):
pass
_context1 = _Context1()
_context2 = _Context2()
class _Factory(object):
def __init__(self, context1, context2):
self.context = context1, context2
comp = self._makeOne()
comp.registerAdapter(_Factory, (ibar, ibaz), ifoo)
adapter = comp.getMultiAdapter((_context1, _context2), ifoo)
self.failUnless(isinstance(adapter, _Factory))
self.assertEqual(adapter.context, (_context1, _context2))
def test_getAdapters_empty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
ibaz = IFoo('IBaz')
@implementer(ibar)
class _Context1(object):
pass
@implementer(ibaz)
class _Context2(object):
pass
_context1 = _Context1()
_context2 = _Context2()
comp = self._makeOne()
self.assertEqual(
list(comp.getAdapters((_context1, _context2), ifoo)), [])
def test_getAdapters_non_empty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
ibaz = IFoo('IBaz')
@implementer(ibar)
class _Context1(object):
pass
@implementer(ibaz)
class _Context2(object):
pass
_context1 = _Context1()
_context2 = _Context2()
class _Factory1(object):
def __init__(self, context1, context2):
self.context = context1, context2
class _Factory2(object):
def __init__(self, context1, context2):
self.context = context1, context2
_name1 = _u('name1')
_name2 = _u('name2')
comp = self._makeOne()
comp.registerAdapter(_Factory1, (ibar, ibaz), ifoo, name=_name1)
comp.registerAdapter(_Factory2, (ibar, ibaz), ifoo, name=_name2)
found = sorted(comp.getAdapters((_context1, _context2), ifoo))
self.assertEqual(len(found), 2)
self.assertEqual(found[0][0], _name1)
self.failUnless(isinstance(found[0][1], _Factory1))
self.assertEqual(found[1][0], _name2)
self.failUnless(isinstance(found[1][1], _Factory2))
def test_registerSubscriptionAdapter_w_nonblank_name(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_name = _u('name')
_info = _u('info')
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
self.assertRaises(TypeError, comp.registerSubscriptionAdapter,
_factory, (ibar,), ifoo, _name, _info)
def test_registerSubscriptionAdapter_w_explicit_provided_and_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import SubscriptionRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_blank = _u('')
_info = _u('info')
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerSubscriptionAdapter(_factory, (ibar,), ifoo,
info=_info)
reg = comp.adapters._subscribers[1][ibar][ifoo][_blank]
self.assertEqual(len(reg), 1)
self.failUnless(reg[0] is _factory)
self.assertEqual(comp._subscription_registrations,
[((ibar,), ifoo, _blank, _factory, _info)])
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, SubscriptionRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, _blank)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _factory)
def test_registerSubscriptionAdapter_wo_explicit_provided(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface.interfaces import Registered
from zope.interface.registry import SubscriptionRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_blank = _u('')
_to_reg = object()
@implementer(ifoo)
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerSubscriptionAdapter(_Factory, (ibar,), info=_info)
reg = comp.adapters._subscribers[1][ibar][ifoo][_blank]
self.assertEqual(len(reg), 1)
self.failUnless(reg[0] is _Factory)
self.assertEqual(comp._subscription_registrations,
[((ibar,), ifoo, _blank, _Factory, _info)])
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, SubscriptionRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, _blank)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _Factory)
def test_registerSubscriptionAdapter_wo_explicit_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import SubscriptionRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_info = _u('info')
_blank = _u('')
class _Factory(object):
__component_adapts__ = (ibar,)
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerSubscriptionAdapter(
_Factory, provided=ifoo, info=_info)
reg = comp.adapters._subscribers[1][ibar][ifoo][_blank]
self.assertEqual(len(reg), 1)
self.failUnless(reg[0] is _Factory)
self.assertEqual(comp._subscription_registrations,
[((ibar,), ifoo, _blank, _Factory, _info)])
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, SubscriptionRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, _blank)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _Factory)
def test_registerSubscriptionAdapter_wo_event(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_blank = _u('')
_info = _u('info')
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerSubscriptionAdapter(_factory, (ibar,), ifoo,
info=_info, event=False)
self.assertEqual(len(_events), 0)
def test_registeredSubscriptionAdapters_empty(self):
comp = self._makeOne()
self.assertEqual(list(comp.registeredSubscriptionAdapters()), [])
def test_registeredSubscriptionAdapters_notempty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
from zope.interface.registry import SubscriptionRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IFoo')
_info = _u('info')
_blank = _u('')
class _Factory(object):
def __init__(self, context):
pass
comp = self._makeOne()
comp.registerSubscriptionAdapter(_Factory, (ibar,), ifoo, info=_info)
comp.registerSubscriptionAdapter(_Factory, (ibar,), ifoo, info=_info)
reg = list(comp.registeredSubscriptionAdapters())
self.assertEqual(len(reg), 2)
self.failUnless(isinstance(reg[0], SubscriptionRegistration))
self.failUnless(reg[0].registry is comp)
self.failUnless(reg[0].provided is ifoo)
self.assertEqual(reg[0].required, (ibar,))
self.assertEqual(reg[0].name, _blank)
self.failUnless(reg[0].info is _info)
self.failUnless(reg[0].factory is _Factory)
self.failUnless(isinstance(reg[1], SubscriptionRegistration))
self.failUnless(reg[1].registry is comp)
self.failUnless(reg[1].provided is ifoo)
self.assertEqual(reg[1].required, (ibar,))
self.assertEqual(reg[1].name, _blank)
self.failUnless(reg[1].info is _info)
self.failUnless(reg[1].factory is _Factory)
def test_unregisterSubscriptionAdapter_w_nonblank_name(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
_nonblank = _u('nonblank')
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterSubscriptionAdapter,
required=ifoo, provided=ibar, name=_nonblank)
def test_unregisterSubscriptionAdapter_neither_factory_nor_provided(self):
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterSubscriptionAdapter,
factory=None, provided=None)
def test_unregisterSubscriptionAdapter_neither_factory_nor_required(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterSubscriptionAdapter,
factory=None, provided=ifoo, required=None)
def test_unregisterSubscriptionAdapter_miss(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterSubscriptionAdapter(_Factory, (ibar,), ifoo)
self.failIf(unreg)
self.failIf(_events)
def test_unregisterSubscriptionAdapter_hit_wo_factory(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import SubscriptionRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerSubscriptionAdapter(_Factory, (ibar,), ifoo)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterSubscriptionAdapter(None, (ibar,), ifoo)
self.failUnless(unreg)
self.failIf(comp.adapters._subscribers)
self.failIf(comp._subscription_registrations)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, SubscriptionRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is None)
def test_unregisterSubscriptionAdapter_hit_w_factory(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import SubscriptionRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerSubscriptionAdapter(_Factory, (ibar,), ifoo)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterSubscriptionAdapter(_Factory, (ibar,), ifoo)
self.failUnless(unreg)
self.failIf(comp.adapters._subscribers)
self.failIf(comp._subscription_registrations)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, SubscriptionRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is _Factory)
def test_unregisterSubscriptionAdapter_wo_explicit_provided(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
from zope.interface.interfaces import Unregistered
from zope.interface.registry import SubscriptionRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
@implementer(ifoo)
class _Factory(object):
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerSubscriptionAdapter(_Factory, (ibar,), ifoo)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterSubscriptionAdapter(_Factory, (ibar,))
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, SubscriptionRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is _Factory)
def test_unregisterSubscriptionAdapter_wo_explicit_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import SubscriptionRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
__component_adapts__ = (ibar,)
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerSubscriptionAdapter(_Factory, (ibar,), ifoo)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterSubscriptionAdapter(_Factory, provided=ifoo)
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, SubscriptionRegistration))
self.failUnless(event.object.registry is comp)
self.failUnless(event.object.provided is ifoo)
self.assertEqual(event.object.required, (ibar,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is _Factory)
def test_subscribers_empty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
comp = self._makeOne()
@implementer(ibar)
class Bar(object):
pass
bar = Bar()
self.assertEqual(list(comp.subscribers((bar,), ifoo)), [])
def test_subscribers_non_empty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Factory(object):
__component_adapts__ = (ibar,)
def __init__(self, context):
self._context = context
class _Derived(_Factory):
pass
comp = self._makeOne()
comp.registerSubscriptionAdapter(_Factory, (ibar,), ifoo)
comp.registerSubscriptionAdapter(_Derived, (ibar,), ifoo)
@implementer(ibar)
class Bar(object):
pass
bar = Bar()
subscribers = comp.subscribers((bar,), ifoo)
def _klassname(x):
return x.__class__.__name__
subscribers = sorted(subscribers, key=_klassname)
self.assertEqual(len(subscribers), 2)
self.failUnless(isinstance(subscribers[0], _Derived))
self.failUnless(isinstance(subscribers[1], _Factory))
def test_registerHandler_w_nonblank_name(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_nonblank = _u('nonblank')
comp = self._makeOne()
def _factory(context):
pass
self.assertRaises(TypeError, comp.registerHandler, _factory,
required=ifoo, name=_nonblank)
def test_registerHandler_w_explicit_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import HandlerRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_blank = _u('')
_info = _u('info')
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerHandler(_factory, (ifoo,), info=_info)
reg = comp.adapters._subscribers[1][ifoo][None][_blank]
self.assertEqual(len(reg), 1)
self.failUnless(reg[0] is _factory)
self.assertEqual(comp._handler_registrations,
[((ifoo,), _blank, _factory, _info)])
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, HandlerRegistration))
self.failUnless(event.object.registry is comp)
self.assertEqual(event.object.required, (ifoo,))
self.assertEqual(event.object.name, _blank)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _factory)
def test_registerHandler_wo_explicit_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Registered
from zope.interface.registry import HandlerRegistration
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_info = _u('info')
_blank = _u('')
class _Factory(object):
__component_adapts__ = (ifoo,)
def __init__(self, context):
self._context = context
comp = self._makeOne()
_monkey, _events = self._wrapEvents()
with _monkey:
comp.registerHandler(_Factory, info=_info)
reg = comp.adapters._subscribers[1][ifoo][None][_blank]
self.assertEqual(len(reg), 1)
self.failUnless(reg[0] is _Factory)
self.assertEqual(comp._handler_registrations,
[((ifoo,), _blank, _Factory, _info)])
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Registered))
self.failUnless(isinstance(event.object, HandlerRegistration))
self.failUnless(event.object.registry is comp)
self.assertEqual(event.object.required, (ifoo,))
self.assertEqual(event.object.name, _blank)
self.failUnless(event.object.info is _info)
self.failUnless(event.object.factory is _Factory)
def test_registeredHandlers_empty(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
comp = self._makeOne()
self.failIf(list(comp.registeredHandlers()))
def test_registeredHandlers_non_empty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.registry import HandlerRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
def _factory1(context):
pass
def _factory2(context):
pass
comp = self._makeOne()
comp.registerHandler(_factory1, (ifoo,))
comp.registerHandler(_factory2, (ifoo,))
def _factory_name(x):
return x.factory.__code__.co_name
subscribers = sorted(comp.registeredHandlers(), key=_factory_name)
self.assertEqual(len(subscribers), 2)
self.failUnless(isinstance(subscribers[0], HandlerRegistration))
self.assertEqual(subscribers[0].required, (ifoo,))
self.assertEqual(subscribers[0].name, '')
self.assertEqual(subscribers[0].factory, _factory1)
self.assertEqual(subscribers[0].info, '')
self.failUnless(isinstance(subscribers[1], HandlerRegistration))
self.assertEqual(subscribers[1].required, (ifoo,))
self.assertEqual(subscribers[1].name, '')
self.assertEqual(subscribers[1].factory, _factory2)
self.assertEqual(subscribers[1].info, '')
def test_unregisterHandler_w_nonblank_name(self):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_nonblank = _u('nonblank')
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterHandler,
required=(ifoo,), name=_nonblank)
def test_unregisterHandler_neither_factory_nor_required(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
self.assertRaises(TypeError, comp.unregisterHandler)
def test_unregisterHandler_miss(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
unreg = comp.unregisterHandler(required=(ifoo,))
self.failIf(unreg)
def test_unregisterHandler_hit_w_factory_and_explicit_provided(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import HandlerRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
comp.registerHandler(_factory, (ifoo,))
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterHandler(_factory, (ifoo,))
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, HandlerRegistration))
self.failUnless(event.object.registry is comp)
self.assertEqual(event.object.required, (ifoo,))
self.assertEqual(event.object.name, '')
self.failUnless(event.object.factory is _factory)
def test_unregisterHandler_hit_w_only_explicit_provided(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import HandlerRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
_to_reg = object()
def _factory(context):
return _to_reg
comp = self._makeOne()
comp.registerHandler(_factory, (ifoo,))
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterHandler(required=(ifoo,))
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, HandlerRegistration))
self.failUnless(event.object.registry is comp)
self.assertEqual(event.object.required, (ifoo,))
self.assertEqual(event.object.name, '')
self.failUnless(event.object.factory is None)
def test_unregisterHandler_wo_explicit_required(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.interfaces import Unregistered
from zope.interface.registry import HandlerRegistration
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
class _Factory(object):
__component_adapts__ = (ifoo,)
def __init__(self, context):
self._context = context
comp = self._makeOne()
comp.registerHandler(_Factory)
_monkey, _events = self._wrapEvents()
with _monkey:
unreg = comp.unregisterHandler(_Factory)
self.failUnless(unreg)
self.assertEqual(len(_events), 1)
args, kw = _events[0]
event, = args
self.assertEqual(kw, {})
self.failUnless(isinstance(event, Unregistered))
self.failUnless(isinstance(event.object, HandlerRegistration))
self.failUnless(event.object.registry is comp)
self.assertEqual(event.object.required, (ifoo,))
self.assertEqual(event.object.name, '')
self.assertEqual(event.object.info, '')
self.failUnless(event.object.factory is _Factory)
def test_handle_empty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
comp = self._makeOne()
@implementer(ifoo)
class Bar(object):
pass
bar = Bar()
comp.handle((bar,)) # doesn't raise
def test_handle_non_empty(self):
from zope.interface.declarations import InterfaceClass
from zope.interface.declarations import implementer
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
_called_1 = []
def _factory_1(context):
_called_1.append(context)
_called_2 = []
def _factory_2(context):
_called_2.append(context)
comp = self._makeOne()
comp.registerHandler(_factory_1, (ifoo,))
comp.registerHandler(_factory_2, (ifoo,))
@implementer(ifoo)
class Bar(object):
pass
bar = Bar()
comp.handle(bar)
self.assertEqual(_called_1, [bar])
self.assertEqual(_called_2, [bar])
# Test _getUtilityProvided, _getAdapterProvided, _getAdapterRequired via their
# callers (Component.registerUtility, Component.registerAdapter).
class UtilityRegistrationTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.registry import UtilityRegistration
return UtilityRegistration
def _makeOne(self, component=None, factory=None):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
class _Registry(object):
def __repr__(self):
return '_REGISTRY'
registry = _Registry()
name = _u('name')
doc = 'DOCSTRING'
klass = self._getTargetClass()
return (klass(registry, ifoo, name, component, doc, factory),
registry,
name,
)
def test_class_conforms_to_IUtilityRegistration(self):
from zope.interface.verify import verifyClass
from zope.interface.interfaces import IUtilityRegistration
verifyClass(IUtilityRegistration, self._getTargetClass())
def test_instance_conforms_to_IUtilityRegistration(self):
from zope.interface.verify import verifyObject
from zope.interface.interfaces import IUtilityRegistration
ur, _, _ = self._makeOne()
verifyObject(IUtilityRegistration, ur)
def test___repr__(self):
class _Component(object):
__name__ = 'TEST'
_component = _Component()
ur, _registry, _name = self._makeOne(_component)
self.assertEqual(repr(ur),
"UtilityRegistration(_REGISTRY, IFoo, %r, TEST, None, 'DOCSTRING')"
% (_name))
def test___repr___provided_wo_name(self):
class _Component(object):
def __repr__(self):
return 'TEST'
_component = _Component()
ur, _registry, _name = self._makeOne(_component)
ur.provided = object()
self.assertEqual(repr(ur),
"UtilityRegistration(_REGISTRY, None, %r, TEST, None, 'DOCSTRING')"
% (_name))
def test___repr___component_wo_name(self):
class _Component(object):
def __repr__(self):
return 'TEST'
_component = _Component()
ur, _registry, _name = self._makeOne(_component)
ur.provided = object()
self.assertEqual(repr(ur),
"UtilityRegistration(_REGISTRY, None, %r, TEST, None, 'DOCSTRING')"
% (_name))
def test___hash__(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
self.assertEqual(ur.__hash__(), id(ur))
def test___eq___identity(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
self.failUnless(ur == ur)
def test___eq___hit(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component)
self.failUnless(ur == ur2)
def test___eq___miss(self):
_component = object()
_component2 = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component2)
self.failIf(ur == ur2)
def test___ne___identity(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
self.failIf(ur != ur)
def test___ne___hit(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component)
self.failIf(ur != ur2)
def test___ne___miss(self):
_component = object()
_component2 = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component2)
self.failUnless(ur != ur2)
def test___lt___identity(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
self.failIf(ur < ur)
def test___lt___hit(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component)
self.failIf(ur < ur2)
def test___lt___miss(self):
_component = object()
_component2 = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component2)
ur2.name = _name + '2'
self.failUnless(ur < ur2)
def test___le___identity(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
self.failUnless(ur <= ur)
def test___le___hit(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component)
self.failUnless(ur <= ur2)
def test___le___miss(self):
_component = object()
_component2 = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component2)
ur2.name = _name + '2'
self.failUnless(ur <= ur2)
def test___gt___identity(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
self.failIf(ur > ur)
def test___gt___hit(self):
_component = object()
_component2 = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component2)
ur2.name = _name + '2'
self.failUnless(ur2 > ur)
def test___gt___miss(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component)
self.failIf(ur2 > ur)
def test___ge___identity(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
self.failUnless(ur >= ur)
def test___ge___miss(self):
_component = object()
_component2 = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component2)
ur2.name = _name + '2'
self.failIf(ur >= ur2)
def test___ge___hit(self):
_component = object()
ur, _registry, _name = self._makeOne(_component)
ur2, _, _ = self._makeOne(_component)
ur2.name = _name + '2'
self.failUnless(ur2 >= ur)
class AdapterRegistrationTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.registry import AdapterRegistration
return AdapterRegistration
def _makeOne(self, component=None):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Registry(object):
def __repr__(self):
return '_REGISTRY'
registry = _Registry()
name = _u('name')
doc = 'DOCSTRING'
klass = self._getTargetClass()
return (klass(registry, (ibar,), ifoo, name, component, doc),
registry,
name,
)
def test_class_conforms_to_IAdapterRegistration(self):
from zope.interface.verify import verifyClass
from zope.interface.interfaces import IAdapterRegistration
verifyClass(IAdapterRegistration, self._getTargetClass())
def test_instance_conforms_to_IAdapterRegistration(self):
from zope.interface.verify import verifyObject
from zope.interface.interfaces import IAdapterRegistration
ar, _, _ = self._makeOne()
verifyObject(IAdapterRegistration, ar)
def test___repr__(self):
class _Component(object):
__name__ = 'TEST'
_component = _Component()
ar, _registry, _name = self._makeOne(_component)
self.assertEqual(repr(ar),
("AdapterRegistration(_REGISTRY, [IBar], IFoo, %r, TEST, "
+ "'DOCSTRING')") % (_name))
def test___repr___provided_wo_name(self):
class _Component(object):
def __repr__(self):
return 'TEST'
_component = _Component()
ar, _registry, _name = self._makeOne(_component)
ar.provided = object()
self.assertEqual(repr(ar),
("AdapterRegistration(_REGISTRY, [IBar], None, %r, TEST, "
+ "'DOCSTRING')") % (_name))
def test___repr___component_wo_name(self):
class _Component(object):
def __repr__(self):
return 'TEST'
_component = _Component()
ar, _registry, _name = self._makeOne(_component)
ar.provided = object()
self.assertEqual(repr(ar),
("AdapterRegistration(_REGISTRY, [IBar], None, %r, TEST, "
+ "'DOCSTRING')") % (_name))
def test___hash__(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
self.assertEqual(ar.__hash__(), id(ar))
def test___eq___identity(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
self.failUnless(ar == ar)
def test___eq___hit(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component)
self.failUnless(ar == ar2)
def test___eq___miss(self):
_component = object()
_component2 = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component2)
self.failIf(ar == ar2)
def test___ne___identity(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
self.failIf(ar != ar)
def test___ne___miss(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component)
self.failIf(ar != ar2)
def test___ne___hit_component(self):
_component = object()
_component2 = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component2)
self.failUnless(ar != ar2)
def test___ne___hit_provided(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ibaz = IFoo('IBaz')
_component = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component)
ar2.provided = ibaz
self.failUnless(ar != ar2)
def test___ne___hit_required(self):
from zope.interface.declarations import InterfaceClass
class IFoo(InterfaceClass):
pass
ibaz = IFoo('IBaz')
_component = object()
_component2 = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component2)
ar2.required = (ibaz,)
self.failUnless(ar != ar2)
def test___lt___identity(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
self.failIf(ar < ar)
def test___lt___hit(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component)
self.failIf(ar < ar2)
def test___lt___miss(self):
_component = object()
_component2 = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component2)
ar2.name = _name + '2'
self.failUnless(ar < ar2)
def test___le___identity(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
self.failUnless(ar <= ar)
def test___le___hit(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component)
self.failUnless(ar <= ar2)
def test___le___miss(self):
_component = object()
_component2 = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component2)
ar2.name = _name + '2'
self.failUnless(ar <= ar2)
def test___gt___identity(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
self.failIf(ar > ar)
def test___gt___hit(self):
_component = object()
_component2 = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component2)
ar2.name = _name + '2'
self.failUnless(ar2 > ar)
def test___gt___miss(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component)
self.failIf(ar2 > ar)
def test___ge___identity(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
self.failUnless(ar >= ar)
def test___ge___miss(self):
_component = object()
_component2 = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component2)
ar2.name = _name + '2'
self.failIf(ar >= ar2)
def test___ge___hit(self):
_component = object()
ar, _registry, _name = self._makeOne(_component)
ar2, _, _ = self._makeOne(_component)
ar2.name = _name + '2'
self.failUnless(ar2 >= ar)
class SubscriptionRegistrationTests(unittest.TestCase):
def _getTargetClass(self):
from zope.interface.registry import SubscriptionRegistration
return SubscriptionRegistration
def _makeOne(self, component=None):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
ibar = IFoo('IBar')
class _Registry(object):
def __repr__(self):
return '_REGISTRY'
registry = _Registry()
name = _u('name')
doc = 'DOCSTRING'
klass = self._getTargetClass()
return (klass(registry, (ibar,), ifoo, name, component, doc),
registry,
name,
)
def test_class_conforms_to_ISubscriptionAdapterRegistration(self):
from zope.interface.verify import verifyClass
from zope.interface.interfaces import ISubscriptionAdapterRegistration
verifyClass(ISubscriptionAdapterRegistration, self._getTargetClass())
def test_instance_conforms_to_ISubscriptionAdapterRegistration(self):
from zope.interface.verify import verifyObject
from zope.interface.interfaces import ISubscriptionAdapterRegistration
sar, _, _ = self._makeOne()
verifyObject(ISubscriptionAdapterRegistration, sar)
class HandlerRegistrationTests(_SilencePy3Deprecations):
def _getTargetClass(self):
from zope.interface.registry import HandlerRegistration
return HandlerRegistration
def _makeOne(self, component=None):
from zope.interface.declarations import InterfaceClass
from zope.interface._compat import _u
class IFoo(InterfaceClass):
pass
ifoo = IFoo('IFoo')
class _Registry(object):
def __repr__(self):
return '_REGISTRY'
registry = _Registry()
name = _u('name')
doc = 'DOCSTRING'
klass = self._getTargetClass()
return (klass(registry, (ifoo,), name, component, doc),
registry,
name,
)
def test_class_conforms_to_IHandlerRegistration(self):
from zope.interface.verify import verifyClass
from zope.interface.interfaces import IHandlerRegistration
verifyClass(IHandlerRegistration, self._getTargetClass())
def test_instance_conforms_to_IHandlerRegistration(self):
from zope.interface.verify import verifyObject
from zope.interface.interfaces import IHandlerRegistration
hr, _, _ = self._makeOne()
verifyObject(IHandlerRegistration, hr)
def test_properties(self):
def _factory(context):
pass
hr, _, _ = self._makeOne(_factory)
self.failUnless(hr.handler is _factory)
self.failUnless(hr.factory is hr.handler)
self.failUnless(hr.provided is None)
def test___repr___factory_w_name(self):
class _Factory(object):
__name__ = 'TEST'
hr, _registry, _name = self._makeOne(_Factory())
self.assertEqual(repr(hr),
("HandlerRegistration(_REGISTRY, [IFoo], %r, TEST, "
+ "'DOCSTRING')") % (_name))
def test___repr___factory_wo_name(self):
class _Factory(object):
def __repr__(self):
return 'TEST'
hr, _registry, _name = self._makeOne(_Factory())
self.assertEqual(repr(hr),
("HandlerRegistration(_REGISTRY, [IFoo], %r, TEST, "
+ "'DOCSTRING')") % (_name))
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
self.to_restore = dict([(key, getattr(module, key)) for key in kw])
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(ComponentsTests),
unittest.makeSuite(UtilityRegistrationTests),
unittest.makeSuite(AdapterRegistrationTests),
unittest.makeSuite(SubscriptionRegistrationTests),
unittest.makeSuite(AdapterRegistrationTests),
))
| 39.464766 | 80 | 0.630783 |
f71806245033ff31b7f8e029e27f81e487b11834 | 20,468 | py | Python | cadnano/views/pathview/tools/pathselection.py | sherwoodyao/cadnano2.5 | ce6ff019b88ee7728de947bd86b35861cf57848d | [
"BSD-3-Clause"
] | 69 | 2015-01-13T02:54:40.000Z | 2022-03-27T14:25:51.000Z | cadnano/views/pathview/tools/pathselection.py | scholer/cadnano2.5 | ce6ff019b88ee7728de947bd86b35861cf57848d | [
"BSD-3-Clause"
] | 127 | 2015-01-01T06:26:34.000Z | 2022-03-02T12:48:05.000Z | cadnano/views/pathview/tools/pathselection.py | scholer/cadnano2.5 | ce6ff019b88ee7728de947bd86b35861cf57848d | [
"BSD-3-Clause"
] | 48 | 2015-01-22T19:57:49.000Z | 2022-03-27T14:27:53.000Z | # -*- coding: utf-8 -*-
import logging
from math import floor
from PyQt5.QtCore import (
QPointF,
QRectF,
Qt
)
from PyQt5.QtGui import (
QPainterPath,
QKeyEvent,
QMouseEvent
)
from PyQt5.QtWidgets import (
QGraphicsItem,
QGraphicsItemGroup,
QGraphicsPathItem,
QGraphicsSceneMouseEvent,
)
from cadnano.gui.palette import getPenObj
from cadnano.views.pathview import pathstyles as styles
from cadnano.views.pathview import (
PathRootItemT,
)
from cadnano.cntypes import (
Vec2T,
DocT
)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
class SelectionItemGroup(QGraphicsItemGroup):
"""SelectionItemGroup
Attributes:
getR (TYPE): Description
selectionbox (TYPE): SelectionBox
translateR (TYPE): Description
viewroot: Description
"""
def __init__(self, boxtype: QGraphicsItem,
constraint: str,
viewroot: PathRootItemT):
"""
Args:
boxtype: :class:`EndpointHandleSelectionBox` or
:class:`VirtualHelixHandleSelectionBox` instance
constraint: ``x`` or ``y``. Default to ``y`` (up and down)
viewroot: view root item and object parent
"""
super(SelectionItemGroup, self).__init__(viewroot)
self.viewroot: PathRootItemT = viewroot
self.setFiltersChildEvents(True)
# LOOK at Qt Source for deprecated code to replace this behavior
# self.setHandlesChildEvents(True) # commented out NC
self.setFlag(QGraphicsItem.ItemIsSelectable)
self.setFlag(QGraphicsItem.ItemIsFocusable) # for keyPressEvents
self.setFlag(QGraphicsItem.ItemHasNoContents)
self._rect = QRectF()
self._PEN = getPenObj(styles.BLUE_STROKE,
styles.PATH_SELECTBOX_STROKE_WIDTH)
self.selectionbox = boxtype(self)
self._drag_enable = False
self._dragged = False
self._r0 = 0 # save original mousedown
self._r = 0 # latest position for moving
# self._lastKid = 0
# this keeps track of mousePressEvents within the class
# to aid in intellignetly removing items from the group
self._added_to_press_list = False
self._pending_to_add_dict = {}
if constraint == 'y':
self.getR = self.selectionbox.getY
self.translateR = self.selectionbox.translateY
else:
self.getR = self.selectionbox.getX
self.translateR = self.selectionbox.translateX
self._normal_select = True
self.setZValue(styles.ZPATHSELECTION)
# end def
# def paint(self, painter, option, widget):
# painter.drawRect(self.boundingRect())
# # end def
def pendToAdd(self, item):
"""
Args:
item (TYPE): Description
"""
self._pending_to_add_dict[item] = True
# end def
def isPending(self, item):
"""
Args:
item (TYPE): Description
Returns:
TYPE: Description
"""
return item in self._pending_to_add_dict
# end def
def document(self) -> DocT:
"""
Returns:
:class:`Document`
"""
return self.viewroot.document()
# end def
def pendToRemove(self, item):
"""
Args:
item (TYPE): Description
"""
if item in self._pending_to_add_dict:
del self._pending_to_add_dict[item]
# end def
def setNormalSelect(self, bool_val: bool):
"""
Args:
bool_val: Description
"""
self._normal_select = bool_val
# end def
def isNormalSelect(self) -> bool:
"""
Returns:
is it normal select?
"""
return self._normal_select
# end def
def processPendingToAddList(self):
"""
Adds to the local selection and the document if required
"""
doc = self.document()
p2add = self._pending_to_add_dict
# logger.debug("processPendingToAddList")
if len(p2add) > 0:
plist = list(self._pending_to_add_dict.keys())
for item in plist:
if p2add[item]:
p2add[item] = False
# logger.debug("just checking1", item, item.group(), item.parentItem())
self.addToGroup(item)
item.modelSelect(doc)
# end for
# logger.debug('finished')
self._pending_to_add_dict = {}
doc.updateStrandSelection()
# end def
def selectionLock(self):
"""
Returns:
TYPE: Description
"""
return self.viewroot.selectionLock()
# end def
def setSelectionLock(self, selection_group):
"""
Args:
selection_group (TYPE): Description
"""
self.viewroot.setSelectionLock(selection_group)
# end def
def keyPressEvent(self, event: QKeyEvent):
"""
Must intercept invalid input events. Make changes here
Args:
event (TYPE): Description
"""
key = event.key()
if key in [Qt.Key_Backspace, Qt.Key_Delete]:
self.selectionbox.deleteSelection()
self.clearSelection(False)
return QGraphicsItemGroup.keyPressEvent(self, event)
else:
return QGraphicsItemGroup.keyPressEvent(self, event)
# end def
def mousePressEvent(self, event: QGraphicsSceneMouseEvent):
"""Handler for user mouse press.
Args:
event: Contains item, scene, and screen
coordinates of the the event, and previous event.
"""
# self.show()
if event.button() != Qt.LeftButton:
return QGraphicsItemGroup.mousePressEvent(self, event)
else:
self._drag_enable = True
# required to get the itemChanged event to work
# correctly for this
self.setSelected(True)
# self.selectionbox.resetTransform()
self.selectionbox.resetPosition()
self.selectionbox.refreshPath()
# self.selectionbox.resetTransform()
self.selectionbox.resetPosition()
self.selectionbox.show()
# for some reason we need to skip the first mouseMoveEvent
self._dragged = False
if self._added_to_press_list is False:
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return QGraphicsItemGroup.mousePressEvent(self, event)
# end def
def mouseMoveEvent(self, event: QGraphicsSceneMouseEvent):
"""
Args:
event: Description
"""
if self._drag_enable is True:
# map the item to the scene coordinates
# to help keep coordinates uniform
rf = self.getR(self.mapFromScene(QPointF(event.scenePos())))
# for some reason we need to skip the first mouseMoveEvent
if self._dragged is False:
self._dragged = True
self._r0 = rf
# end if
else:
delta = self.selectionbox.delta(rf, self._r0)
self.translateR(delta)
# logger.debug('mouse move path selectionbox', delta, rf, self._r0)
# end else
self._r = rf
# end if
else:
QGraphicsItemGroup.mouseMoveEvent(self, event)
# end else
# end def
def customMouseRelease(self, event: QMouseEvent):
"""
Args:
event: Description
"""
self.selectionbox.setParentItem(self.viewroot)
self.selectionbox.hide()
self.selectionbox.resetTransform()
self._drag_enable = False
# now do stuff
if not (self._r0 == 0 and self._r == 0):
modifiers = event.modifiers()
self.selectionbox.processSelectedItems(self._r0, self._r, modifiers)
# end if
self._r0 = 0 # reset
self._r = 0 # reset
self.setFocus() # needed to get keyPresses post a move
self._added_to_press_list = False
# end def
def resetSelection(self):
"""Summary
Returns:
TYPE: Description
"""
self._pending_to_add_dict = {}
self._added_to_press_list = False
self.clearSelection(False)
self.setSelectionLock(None)
self.selectionbox.setParentItem(self.viewroot)
self.setParentItem(self.viewroot)
# end def
def clearSelection(self, value):
"""value is for keyPressEvents
Arguments:
value (QVariant): resolves in Python as an integer
"""
if value == False: # noqa
self.selectionbox.hide()
self.selectionbox.resetPosition()
self.removeSelectedItems()
self.viewroot.setSelectionLock(None)
self.clearFocus() # this is to disable delete keyPressEvents
self.prepareGeometryChange()
self._rect.setWidth(0)
# self._rect = QRectF()
# end if
else:
self.setFocus() # this is to get delete keyPressEvents
self.update(self.boundingRect())
# end def
def itemChange(self, change, value):
"""docstring for itemChange
Arguments:
change (GraphicsItemChange): see http://doc.qt.io/qt-5/qgraphicsitem.html#GraphicsItemChange-enum
value (QVariant): resolves in Python as an integer
"""
# logger.debug("ps itemChange")
if change == QGraphicsItem.ItemSelectedChange:
# logger.debug("isc", value)
if value == False: # noqa
self.clearSelection(False)
return False
else:
return True
elif change == QGraphicsItem.ItemChildAddedChange:
# logger.debug("icac")
if self._added_to_press_list is False:
# logger.debug("kid added")
self.setFocus() # this is to get delete keyPressEvents
self.selectionbox.boxParent()
# self.setParentItem(self.selectionbox.boxParent())
self._added_to_press_list = True
self.scene().views()[0].addToPressList(self)
return
return QGraphicsItemGroup.itemChange(self, change, value)
# end def
def removeChild(self, child):
"""
remove only the child and ask it to
restore it's original parent
Args:
child (TYPE): Description
"""
doc = self.document()
self.removeFromGroup(child)
child.modelDeselect(doc)
# end def
def removeSelectedItems(self):
"""docstring for removeSelectedItems
"""
doc = self.document()
for item in self.childItems():
self.removeFromGroup(item)
item.modelDeselect(doc)
# end for
doc.updateStrandSelection()
# end def
def setBoundingRect(self, rect):
"""Summary
Args:
rect (TYPE): Description
Returns:
TYPE: Description
"""
self.prepareGeometryChange()
self._rect = rect
# end def
def boundingRect(self):
"""Summary
Returns:
TYPE: Description
"""
return self._rect
# end class
class VirtualHelixHandleSelectionBox(QGraphicsPathItem):
"""
docstring for VirtualHelixHandleSelectionBox
"""
_HELIX_HEIGHT = styles.PATH_HELIX_HEIGHT + styles.PATH_HELIX_PADDING
_RADIUS = styles.VIRTUALHELIXHANDLEITEM_RADIUS
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.BLUE_STROKE, _PEN_WIDTH)
def __init__(self, item_group: SelectionItemGroup):
"""
The item_group.parentItem() is expected to be a partItem
Args:
item_group (TYPE): Description
"""
super(VirtualHelixHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = None
self._pos0 = QPointF()
# end def
def getY(self, pos):
"""Summary
Args:
pos (TYPE): Description
Returns:
TYPE: Description
"""
pos = self._item_group.mapToScene(QPointF(pos))
return pos.y()
# end def
def translateY(self, delta):
"""Summary
Args:
delta (TYPE): Description
Returns:
TYPE: Description
"""
self.setY(delta)
# end def
def refreshPath(self):
"""Summary
Returns:
TYPE: Description
"""
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
# end def
def painterPath(self):
"""Summary
Returns:
TYPE: Description
"""
i_g = self._item_group
# the childrenBoundingRect is necessary to get this to work
rect = self.mapRectFromItem(i_g, i_g.childrenBoundingRect())
radius = self._RADIUS
path = QPainterPath()
path.addRoundedRect(rect, radius, radius)
path.moveTo(rect.right(), rect.center().y())
path.lineTo(rect.right() + radius / 2, rect.center().y())
return path
# end def
def processSelectedItems(self, r_start, r_end, modifiers):
"""docstring for processSelectedItems
Args:
r_start (TYPE): Description
r_end (TYPE): Description
modifiers (TYPE): Description
"""
margin = styles.VIRTUALHELIXHANDLEITEM_RADIUS
delta = (r_end - r_start) # r delta
mid_height = (self.boundingRect().height()) / 2 - margin
helix_height = self._HELIX_HEIGHT
if abs(delta) < mid_height: # move is too short for reordering
return
if delta > 0: # moved down, delta is positive
indexDelta = int((delta - mid_height) / helix_height)
else: # moved up, delta is negative
indexDelta = int((delta + mid_height) / helix_height)
# sort on y to determine the extremes of the selection group
items = sorted(self._item_group.childItems(), key=lambda vhhi: vhhi.y())
part_item = items[0].partItem()
part_item.reorderHelices([item.idNum() for item in items],
indexDelta)
# part_item.reorderHelices(items[0].idNum(),
# items[-1].idNum(),
# indexDelta)
part_item.updateStatusBar("")
# end def
def boxParent(self):
"""Summary
Returns:
TYPE: Description
"""
temp = self._item_group.childItems()[0].partItem()
self.setParentItem(temp)
return temp
# end def
def deleteSelection(self):
"""
Delete selection operates outside of the documents a virtual helices
are not actually selected in the model
"""
vh_handle_items = self._item_group.childItems()
u_s = self._item_group.document().undoStack()
u_s.beginMacro("delete Virtual Helices")
for vhhi in vh_handle_items:
part = vhhi.part()
part.removeVirtualHelix(vhhi.idNum())
u_s.endMacro()
# end def
def bounds(self):
"""Summary
Returns:
TYPE: Description
"""
return self._bounds
# end def
def delta(self, yf, y0):
"""Summary
Args:
yf (TYPE): Description
y0 (TYPE): Description
Returns:
TYPE: Description
"""
return yf - y0
# end def
def resetPosition(self):
"""Summary
Returns:
TYPE: Description
"""
self.setPos(self._pos0)
# end def
# end class
class EndpointHandleSelectionBox(QGraphicsPathItem):
"""Summary
"""
_PEN_WIDTH = styles.SELECTIONBOX_PEN_WIDTH
_BOX_PEN = getPenObj(styles.SELECTED_COLOR, _PEN_WIDTH)
_BASE_WIDTH = styles.PATH_BASE_WIDTH
def __init__(self, item_group: SelectionItemGroup):
"""The item_group.parentItem() is expected to be a partItem
Args:
item_group: Description
"""
super(EndpointHandleSelectionBox, self).__init__(item_group.parentItem())
self._item_group = item_group
self._rect = item_group.boundingRect()
self.hide()
self.setPen(self._BOX_PEN)
self.setZValue(styles.ZPATHSELECTION)
self._bounds = (0, 0)
self._pos0 = QPointF()
# end def
def getX(self, pos: QPointF) -> float:
"""
Args:
pos: Description
Returns:
``x`` position
"""
return pos.x()
# end def
def translateX(self, delta: float):
"""
Args:
delta: Description
"""
children = self._item_group.childItems()
if children:
p_i = children[0].partItem()
str = "+%d" % delta if delta >= 0 else "%d" % delta
p_i.updateStatusBar(str)
self.setX(self._BASE_WIDTH * delta)
# end def
def resetPosition(self):
"""
"""
self.setPos(self._pos0)
def delta(self, xf: float, x0: float) -> float:
"""
Args:
xf: Description
x0: Description
Returns:
change distance
"""
bound_l, bound_h = self._bounds
delta = int(floor((xf - x0) / self._BASE_WIDTH))
if delta > 0 and delta > bound_h:
delta = bound_h
elif delta < 0 and abs(delta) > bound_l:
delta = -bound_l
return delta
def refreshPath(self):
"""
"""
temp_low, temp_high = self._item_group.viewroot.document().getSelectionBounds()
self._bounds = (temp_low, temp_high)
# logger.debug("rp:", self._bounds)
self.prepareGeometryChange()
self.setPath(self.painterPath())
self._pos0 = self.pos()
# end def
def painterPath(self) -> QPainterPath:
"""
Returns:
:class:`QPainterPath`
"""
bw = self._BASE_WIDTH
i_g = self._item_group
# the childrenBoundingRect is necessary to get this to work
rect_IG = i_g.childrenBoundingRect()
rect = self.mapRectFromItem(i_g, rect_IG)
if rect.width() < bw:
rect.adjust(-bw / 4, 0, bw / 2, 0)
path = QPainterPath()
path.addRect(rect)
self._item_group.setBoundingRect(rect_IG)
# path.addRoundedRect(rect, radius, radius)
# path.moveTo(rect.right(),\
# rect.center().y())
# path.lineTo(rect.right() + radius / 2,\
# rect.center().y())
return path
# end def
def processSelectedItems(self, r_start: float, r_end: float, modifiers):
"""
Args:
r_start: Description
r_end: Description
modifiers (TYPE): Description
"""
delta = self.delta(r_end, r_start)
# TODO reenable do_maximize?????
# if modifiers & Qt.AltModifier:
# do_maximize = True
# else:
# do_maximize = False
self._item_group.viewroot.document().resizeSelection(delta)
# end def
def deleteSelection(self):
"""Summary
Returns:
TYPE: Description
"""
self._item_group.document().deleteStrandSelection()
def boxParent(self) -> QGraphicsItem:
"""Get the parent :class:`ProxyParentItem`
Returns:
:class:`ProxyParentItem`
"""
temp = self._item_group.childItems()[0].partItem().proxy()
self.setParentItem(temp)
return temp
# end def
def bounds(self) -> Vec2T:
"""
Returns:
the bounds
"""
return self._bounds
# end def
# end class
| 28.467316 | 109 | 0.569572 |
f7180a3e45377a91711d6c8fa67895d8d860641f | 1,780 | py | Python | mlprocessors/consolecapture.py | flatironinstitute/mountaintools | d5680599381e0810c4aa5b309b9ef9ec7f2d1b25 | [
"Apache-2.0"
] | 2 | 2019-11-07T14:09:02.000Z | 2021-09-23T01:09:04.000Z | mountaintools/mlprocessors/consolecapture.py | flatironinstitute/spikeforest_old | d9470194dc906b949178b9c44d14aea57a1f6c27 | [
"Apache-2.0"
] | 13 | 2019-05-04T09:34:53.000Z | 2019-06-23T07:05:58.000Z | mountaintools/mlprocessors/consolecapture.py | flatironinstitute/spikeforest_old | d9470194dc906b949178b9c44d14aea57a1f6c27 | [
"Apache-2.0"
] | 1 | 2021-09-23T01:07:21.000Z | 2021-09-23T01:07:21.000Z | from typing import Any
import sys
import time
import os
import tempfile
class Logger2():
def __init__(self, file1: Any, file2: Any):
self.file1 = file1
self.file2 = file2
def write(self, data: str) -> None:
self.file1.write(data)
self.file2.write(data)
def flush(self) -> None:
self.file1.flush()
self.file2.flush()
class ConsoleCapture():
def __init__(self):
self._console_out = ''
self._tmp_fname = None
self._file_handle = None
self._time_start = None
self._time_stop = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
def start_capturing(self) -> None:
self._tmp_fname = tempfile.mktemp(suffix='.txt')
self._file_handle = open(self._tmp_fname, 'w')
sys.stdout = Logger2(self._file_handle, self._original_stdout)
sys.stderr = Logger2(self._file_handle, self._original_stderr)
self._time_start = time.time()
def stop_capturing(self) -> None:
assert self._tmp_fname is not None
self._time_stop = time.time()
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._file_handle.close()
with open(self._tmp_fname, 'r') as f:
self._console_out = f.read()
os.unlink(self._tmp_fname)
def addToConsoleOut(self, txt: str) -> None:
self._file_handle.write(txt)
def runtimeInfo(self) -> dict:
assert self._time_start is not None
return dict(
start_time=self._time_start - 0,
end_time=self._time_stop - 0,
elapsed_sec=self._time_stop - self._time_start
)
def consoleOut(self) -> str:
return self._console_out
| 28.709677 | 70 | 0.626404 |
f718393cecda836a590a6dc97b77a13ca4ce20f5 | 70,177 | py | Python | cathpy/core/align.py | shouldsee/cathpy | 5f7fa1322434b2d254f0158c5840f029b12dbafe | [
"MIT"
] | null | null | null | cathpy/core/align.py | shouldsee/cathpy | 5f7fa1322434b2d254f0158c5840f029b12dbafe | [
"MIT"
] | null | null | null | cathpy/core/align.py | shouldsee/cathpy | 5f7fa1322434b2d254f0158c5840f029b12dbafe | [
"MIT"
] | null | null | null | """
Manipulate protein sequences and alignments
"""
# core
import io
import gzip
import logging
import re
import functools
# pip
import dendropy
# local
from cathpy.core import error as err
from cathpy.core.tests import is_valid_domain_id
from cathpy.core.models import AminoAcid, AminoAcids, Residue, Segment
LOG = logging.getLogger(__name__)
class Sequence(object):
"""Class to represent a protein sequence."""
re_gap_chars = r'[.\-]'
has_warned_about_deprecated_sequence_headers = False
def __init__(self, hdr: str, seq: str, *, meta=None, description=None):
self._hdr = hdr
self._seq = seq
try:
hdr_info = Sequence.split_hdr(hdr)
except:
raise err.GeneralError('caught error while parsing sequence header: '+hdr)
self._id = hdr_info['id']
self.accession = hdr_info['accession']
self.description = description
self.id_type = hdr_info['id_type']
self.id_ver = hdr_info['id_ver']
self.segs = hdr_info['segs']
self.meta = hdr_info['meta']
if meta:
for key, val in meta.items():
self.meta[key] = val
@property
def uid(self):
"""Returns the unique id for this Sequence"""
return self._id
def set_uid(self, _id):
"""Sets the unique id of the current Sequence object"""
self._id = _id
@property
def is_cath_domain(self):
"""Returns whether this Sequence is a CATH domain."""
return self.id_type == 'domain'
def get_residues(self):
"""
Returns an array of Residue objects based on this sequence.
Note: if segment information has been specified then this
will be used to calculate the `seq_num` attribute.
Raises:
OutOfBoundsError: problem mapping segment info to sequence
"""
residues = []
segs = self.segs
if not segs:
segs = [Segment(1, len(self.seq_no_gaps))]
current_seg_offset = 0
def next_seg():
nonlocal current_seg_offset
if current_seg_offset < len(segs):
seg = segs[current_seg_offset]
current_seg_offset += 1
return seg
else:
return None
# theoretical length according to segment info vs length according to sequence
seg_length = 0
for seg in segs:
seg_length += seg.stop - seg.start + 1
actual_length = len(self.seq_no_gaps)
if seg_length != actual_length:
# should this be a warning? (with 1-n numbering as fallback?)
raise err.OutOfBoundsError(
('segment information {} suggests that the sequence '
'length should be {}, but the sequence has {} (non-gap) characters: {}').format(
repr(segs), seg_length, actual_length, self.seq))
current_seg = next_seg()
seq_num = current_seg.start
for offset, aa in enumerate(self.seq, 0):
if current_seg and seq_num > current_seg.stop:
current_seg = next_seg()
if not current_seg:
if not Sequence.is_gap(aa):
raise err.OutOfBoundsError(
('unable to map segment ({}) to sequence: '
'the final segment ends at {}, but the sequence has {} residues '
'(offset: {}, aa: {})').format(
repr(current_seg), seq_num-1, len(self.seq_no_gaps), offset, aa
))
else:
seq_num = None
else:
seq_num = current_seg.start
if Sequence.is_gap(aa):
res = Residue(aa)
else:
res = Residue(aa, seq_num)
seq_num += 1
residues.append(res)
return residues
def get_res_at_offset(self, offset):
"""Return the residue character at the given offset (includes gaps)."""
try:
res = self.seq[offset]
except:
raise err.SeqIOError((
"Error: failed to get residue at offset {} from sequence "
"with length {}: '{}'").format(offset, self.length(), self.seq))
return res
def get_res_at_seq_position(self, seq_pos):
"""Return the residue character at the given sequence position (ignores gaps)."""
seq_nogap = re.sub(Sequence.re_gap_chars, '', self.seq)
try:
res = seq_nogap[seq_pos-1]
except:
raise err.SeqIOError((
"Error: failed to get residue at position {} from sequence with {} "
"non-gap sequence positions: '{}'").format(
seq_pos, len(seq_nogap), self.seq))
return res
def get_seq_position_at_offset(self, offset):
"""Returns sequence position (ignoring gaps) of the given residue (may include gaps)."""
seq_to_offset = self.seq[:offset+1]
if re.match(seq_to_offset[-1], Sequence.re_gap_chars):
raise err.GapError(
"Cannot get sequence position at offset {} since this corresponds to a gap".format(
offset))
seq_nogap = re.sub(Sequence.re_gap_chars, '', seq_to_offset)
return len(seq_nogap)
def get_offset_at_seq_position(self, seq_pos):
"""Return the offset (with gaps) of the given sequence position (ignores gaps)."""
current_seq_pos = 0
for offset in range(len(self.seq)):
if not re.match(Sequence.re_gap_chars, self.seq[offset]):
current_seq_pos += 1
if current_seq_pos == seq_pos:
return offset
raise err.OutOfBoundsError("failed to find offset at sequence position {}".format(seq_pos))
def length(self):
"""Return the length of the sequence."""
return len(self.seq)
@property
def seq(self):
"""Return the amino acid sequence as a string."""
return self._seq
@property
def seq_no_gaps(self):
"""Return the amino acid sequence as a string (after removing all gaps)."""
seq = re.sub(self.re_gap_chars, '', self._seq)
return seq
def set_sequence(self, seq):
"""Sets the AA residues for this Sequence."""
self._seq = seq
def set_cluster_id(self, id_str):
"""Sets the cluster id for this Sequence."""
self.meta['CLUSTER_ID'] = id_str
@property
def cluster_id(self):
"""Returns the cluster id for this Sequence."""
return self.meta['CLUSTER_ID'] if 'CLUSTER_ID' in self.meta else None
@classmethod
def split_hdr(cls, hdr: str) -> dict:
"""
Splits a sequence header into meta information.
Args:
hdr (str): header string (eg `'domain|4_2_0|1cukA01/3-23_56-123'`)
Returns:
info (dict): header info
::
{
'id': 'domain|4_2_0|1cukA01/3-23_56-123',
'accession': '1cukA01',
'id_type': 'domain',
'id_ver': '4_2_0',
'segs': [Segment(3, 23), Segment(56,123)],
'meta': {}
}
"""
accession = None
id_type = None
id_ver = None
segs = []
meta = {}
if not hdr:
raise err.ParamError('hdr seems to be empty')
# split meta features (after whitespace)
hdr_parts = hdr.split(maxsplit=1)
id_with_segs_str = hdr_parts[0]
meta_str = hdr_parts[1] if len(hdr_parts) > 1 else None
# split id / segments
id_with_segs_parts = id_with_segs_str.split('/', maxsplit=1)
id_str = id_with_segs_parts[0]
segs_str = id_with_segs_parts[1] if len(id_with_segs_parts) > 1 else None
# split id into type, id, version
id_parts = id_str.split('|')
# 1cukA01/23-123
if len(id_parts) == 1:
accession = id_parts[0]
if is_valid_domain_id(accession):
id_type = 'domain'
# domain|1cukA01/23-123
if len(id_parts) == 2:
id_type, accession = id_parts
# cath|4_2_0|5lhzA00/886-963
# cath|current|5lhzA00/886-963
if len(id_parts) == 3:
id_type, id_ver, accession = id_parts
if is_valid_domain_id(id_ver):
if not __class__.has_warned_about_deprecated_sequence_headers:
LOG.warning(
("Warning: found an old sequence header with TYPE|ID|VERSION '%s'. "
"Parsing this as TYPE|VERSION|ID for now, but this is a hack, and "
"may be deprecated in future versions (fix structural cluster reps?)"
"(ignoring all future occurrences in this runtime)"), id_parts)
__class__.has_warned_about_deprecated_sequence_headers = True
id_type, accession, id_ver = id_parts
# segments
if segs_str:
for seg_str in segs_str.split('_'):
(start, stop) = seg_str.split('-')
seg = Segment(int(start), int(stop))
segs.append(seg)
# features
if meta_str:
meta_parts = meta_str.split()
for f in meta_parts.split('=', maxsplit=1):
if len(f) == 2:
meta[f[0]] = f[1]
else:
LOG.warning("failed to parse meta feature from string %s", meta_str)
return({'accession': accession, 'id': id_with_segs_str, 'id_type': id_type,
'id_ver': id_ver, 'segs': segs, 'meta': meta})
def to_fasta(self, wrap_width=80):
"""Return a string for this Sequence in FASTA format."""
fasta_str = ""
fasta_str += '>' + self.uid + '\n'
if wrap_width:
for line in Sequence._chunker(self.seq, wrap_width):
fasta_str += line + '\n'
else:
fasta_str += self.seq + '\n'
return fasta_str
def to_pir(self, wrap_width=60, use_accession=False):
"""Return a string for this Sequence in PIR format."""
pir_str = ""
pir_str += '>P1;{}\n'.format(self.uid if not use_accession else self.accession)
desc = self.description or self.accession
pir_str += desc + '\n'
seq = self.seq + '*'
if wrap_width:
for line in Sequence._chunker(seq, wrap_width):
pir_str += line + '\n'
else:
pir_str += seq + '\n'
return pir_str
def copy(self):
"""Provide a deep copy of this sequence."""
s = Sequence(self._hdr, self.seq, meta=self.meta)
return s
def insert_gap_at_offset(self, offset, gap_char="-"):
"""Insert a gap into the current sequence at a given offset."""
new_seq = self.seq[:offset] + gap_char + self.seq[offset:]
self.set_sequence(new_seq)
def set_gap_char_at_offset(self, offset, gap_char):
"""
Set the gap character at the given offset.
If the residue at a given position is a gap, then override
the gap char with the given character.
"""
residues = list(self.seq)
if Sequence.is_gap(residues[offset]) and residues[offset] != gap_char:
residues[offset] = gap_char
self.set_sequence("".join(residues))
def lower_case_at_offset(self, start, stop=None):
"""Lower case the residues in the given sequence window."""
if stop is None:
stop = start + 1
old_seq = self.seq
new_seq = old_seq[:start] + old_seq[start:stop].lower() + old_seq[stop:]
self.set_sequence(new_seq)
def set_all_gap_chars(self, gap_char='-'):
"""Sets all gap characters."""
seqstr = re.sub(self.re_gap_chars, gap_char, self.seq)
self.set_sequence(seqstr)
def set_lower_case_to_gap(self, gap_char='-'):
"""Set all lower-case characters to gap."""
seqstr = re.sub(r'[a-z]', gap_char, self.seq)
self.set_sequence(seqstr)
def slice_seq(self, start, stop=None):
"""Return a slice of this sequence."""
return self.seq[start:stop]
@staticmethod
def _chunker(text_str, width):
return (text_str[pos:pos + width] for pos in range(0, len(text_str), width))
@staticmethod
def is_gap(res_char):
"""Test whether a character is considered a gap."""
return res_char in ['-', '.']
@property
def accession_and_seginfo(self):
"""Returns accession and segment info for this Sequence."""
segs_str = self.seginfo
if segs_str:
return self.accession + '/' + segs_str
else:
return self.accession
@property
def seginfo(self):
"""Returns the segment info for this Sequence."""
segs_str = '_'.join(['-'.join([str(s.start), str(s.stop)]) for s in self.segs])
return segs_str
def apply_segments(self, segs):
"""
Returns a subset of the current sequence, chopped by the segments.
Args:
segs ([]): [Segment] or [[START, STOP], ...]
Returns:
seq (:class:`Sequence`): sequence object
"""
if self.segs:
raise Exception("cannot apply segments as Sequence already has segments defined")
seq = self.seq
acc = self.accession
startstops = [(seg[0], seg[1]) for seg in segs]
seq_range = '_'.join(['{}-{}'.format(ss[0],ss[1]) for ss in startstops])
seq_parts = [seq[ss[0]-1:ss[1]] for ss in startstops]
subseq = Sequence(hdr="{}/{}".format(acc, seq_range), seq="".join(seq_parts))
return subseq
def __str__(self):
"""Represents this Sequence as a string."""
return '{:<30} {}'.format(self.uid, self.seq)
def __len__(self):
return len(self.seq)
class Correspondence(object):
"""
Provides a mapping between ATOM and SEQRES residues.
A correspondence is a type of alignment that provides the equivalences
between the residues in the protein sequence (eg ``SEQRES`` records) and
the residues actually observed in the structure (eg ``ATOM`` records).
Within CATH, this is most commonly initialised from a GCF file:
::
aln = Correspondence.from_gcf('/path/to/<uid>.gcf')
TODO: allow this to be created from PDBe API endpoint.
"""
GCF_GAP_CHAR = '*'
FASTA_GAP_CHAR = '-'
def __init__(self, uid=None, *, hdr=None, residues=None,):
"""Create a new Correspondence object."""
self._uid = uid
self._hdr = hdr
self.residues = residues if residues else []
super().__init__()
@property
def uid(self):
"""Returns the unique id of the current Correspondence object."""
return self._uid
@classmethod
def from_gcf(cls, gcf_io):
"""Create a new Correspondence object from a GCF io / filename / string.
This provides a correspondence between SEQRES and ATOM records for a given
protein structure.
Example format:
::
>gi|void|ref1
A 1 5 A
K 2 6 K
G 3 7 G
H 4 8 H
P 5 9 P
G 6 10 G
P 7 10A P
K 8 10B K
A 9 11 A
P 10 * *
G 11 * *
...
"""
if isinstance(gcf_io, str):
if gcf_io[0] == '>':
gcf_io = io.StringIO(gcf_io)
else:
gcf_io = open(gcf_io)
try:
hdr = gcf_io.readline().strip()
hdr = hdr[1:] # remove '>'
uid = hdr.split('|')[-1]
except AttributeError:
# make a potentially confusing error slightly less so
raise err.SeqIOError(
"encountered an error trying to readline() on GCF io ({})".format(gcf_io))
line_no = 1
residues = []
for line in gcf_io:
line_no += 1
try:
seqres_aa, seqres_num, pdb_label, pdb_aa = line.split()
if pdb_aa is not seqres_aa and pdb_aa is not Correspondence.GCF_GAP_CHAR:
LOG.warning("pdb_aa '%s' does not match seqres_aa '%s' (line: %s)",
pdb_aa, seqres_aa, line_no)
except:
raise err.SeqIOError("Error: failed to parse GCF '{}' ({}:{})".format(
line, str(gcf_io), line_no))
if pdb_label is Correspondence.GCF_GAP_CHAR:
pdb_label = None
pdb_aa = None
res = Residue(seqres_aa, int(seqres_num), pdb_label, pdb_aa=pdb_aa)
residues.extend([res])
gcf_io.close()
corr = Correspondence(uid=uid, hdr=hdr, residues=residues)
return corr
@property
def seqres_length(self) -> int:
"""Returns the number of `SEQRES` residues"""
return len(self.residues)
@property
def atom_length(self) -> int:
"""Returns the number of `ATOM` residues"""
atom_residues = [res for res in self.residues if res.pdb_label is not None]
return len(atom_residues)
def get_res_at_offset(self, offset: int) -> Residue:
"""Returns the :class:`Residue` at the given offset (zero-based)"""
return self.residues[offset]
def get_res_by_seq_num(self, seq_num: int) -> Residue:
"""Return the :class:`Residue` with the given sequence number"""
res = next((res for res in self.residues if res.seq_num == seq_num), None)
return res
def get_res_by_pdb_label(self, pdb_label: str) -> Residue:
"""Returns the :class:`Residue` that matches `pdb_label`"""
res = next((res for res in self.residues if res.pdb_label == pdb_label), None)
return res
def get_res_by_atom_pos(self, pos: int) -> Residue:
"""Returns Residue corresponding to position in the ATOM sequence (ignores gaps)."""
assert isinstance(pos, int)
assert pos >= 1
atom_residues = [res for res in self.residues if res.pdb_label is not None]
res = atom_residues[pos-1]
return res
def get_res_offset_by_atom_pos(self, pos: int) -> Residue:
"""
Returns offset of Residue at given position in the ATOM sequence (ignoring gaps).
Raises:
:class:`cathpy.error.OutOfBoundsError`
"""
assert isinstance(pos, int)
assert pos >= 1
atom_pos = 0
for offset, res in enumerate(self.residues):
if res.pdb_label is not None:
atom_pos += 1
# LOG.debug("pos({}) -> res: offset: {}, res: {}, atom_pos: {}".format(
# pos, offset, repr(res), atom_pos))
if atom_pos == pos:
return offset
atom_residues = [res for res in self.residues if res.pdb_label is not None]
raise err.OutOfBoundsError(
"failed to find residue in atom pos {}, last atom residue is {} (position {})".format(
pos, repr(atom_residues[-1]), atom_pos))
@property
def first_residue(self) -> Residue:
"""Returns the first residue in the correspondence."""
return self.get_res_at_offset(0)
@property
def last_residue(self) -> Residue:
"""Returns the last residue in the correspondence."""
return self.get_res_at_offset(-1)
@property
def atom_sequence(self) -> Sequence:
"""Returns a Sequence corresponding to the ATOM records."""
_id = "atom|{}".format(self.uid)
res = [res.pdb_aa if res.pdb_label else Correspondence.FASTA_GAP_CHAR
for res in self.residues]
return Sequence(_id, "".join(res))
@property
def seqres_sequence(self) -> Sequence:
"""Returns a Sequence corresponding to the SEQRES records."""
_id = "seqres|{}".format(self.uid)
res = [res.aa for res in self.residues]
return Sequence(_id, "".join(res))
def apply_seqres_segments(self, segs):
"""Returns a new correspondence from just the residues within the segments."""
current_seg_offset = 0
def next_seg():
nonlocal current_seg_offset
# LOG.debug("apply_seqres_segments.next_seg: current={} segs={}".format(
# current_seg_offset, repr(segs) ))
if current_seg_offset < len(segs):
seg = segs[current_seg_offset]
current_seg_offset += 1
return seg
current_seg = next_seg()
selected_residues = []
for res in self.residues:
# LOG.debug('apply_seqres.res: [{}] {}-{} seq_num={}'.format(
# current_seg_offset, current_seg.start, current_seg.stop,
# res.seq_num))
if res.seq_num >= current_seg.start and res.seq_num <= current_seg.stop:
selected_residues.append(res)
elif res.seq_num < current_seg.start:
pass
elif res.seq_num > current_seg.stop:
current_seg = next_seg()
if not current_seg:
break
else:
raise err.SeqIOError("unexpected error - shouldn't be able to reach this code")
corr = __class__(uid=self.uid, hdr=self._hdr, residues=selected_residues)
return corr
def to_gcf(self) -> str:
"""Renders the current object as a GCF string.
Example format:
::
>gi|void|ref1
A 1 5 A
K 2 6 K
G 3 7 G
H 4 8 H
P 5 9 P
G 6 10 G
P 7 10A P
K 8 10B K
A 9 11 A
P 10 * *
G 11 * *
...
"""
hdr = self._hdr if self._hdr else self.uid
gcf_str = '>' + hdr + '\n'
for res in self.residues:
if res.pdb_label:
pdb_label = '{}{}'.format(res.pdb_residue_num, res.pdb_insert_code if res.pdb_insert_code else ' ')
vals = [res.aa, res.seq_num, pdb_label, res.pdb_aa]
else:
vals = [res.aa, res.seq_num, '* ', '*']
gcf_str += '{} {:>3} {:>4} {}\n'.format(*vals)
return gcf_str
def to_sequences(self) -> [Sequence]:
"""Returns the Correspondence as a list of `Sequence` objects"""
seqs = (self.seqres_sequence, self.atom_sequence)
return seqs
def to_fasta(self, **kwargs) -> str:
"""Returns the Correspondence as a string (FASTA format)."""
seqs = self.to_sequences()
return seqs[0].to_fasta(**kwargs) + seqs[1].to_fasta(**kwargs)
def to_aln(self):
"""Returns the Correspondence as an Align object."""
seqs = self.to_sequences()
return Align(seqs=seqs)
def __str__(self):
return self.to_fasta()
def __repr__(self):
return self.to_fasta()
class AlignMetaSummary(object):
def __init__(self, *, seq_count, ec_term_counts=None, go_term_counts=None,
cath_domain_count=0, dops_score=None, organism_newick=None):
self.seq_count = seq_count
self.ec_term_counts = ec_term_counts
self.go_term_counts = go_term_counts
self.cath_domain_count = cath_domain_count
self.dops_score = dops_score
self.organism_newick = organism_newick
class Align(object):
"""
Object representing a protein sequence alignment.
The only required field is `sequences`, otherwise all fields are optional
and are mainly here to satisfy the named fields in `STOCKHOLM` alignment
format.
Args:
seqs ([:class:`Sequence`]): aligned sequences (required)
uid (str): unique identifier for this alignment
accession (str): accession for this alignment
author (str): person responsible for creating this alignment
cath_version (str | :class:`CathVersion`): CATH version
dops_score (float): sequence diversity score (0 low, 100 high)
description (str): description to associate with this alignment
aln_type (str): type of alignment (eg cluster type)
min_bitscore (float): minimum bitscore for sequences in this alignment
tree_nhx (str): store the tree (NHX format)
tree_id (str): identifier of the tree
"""
REF_GAP_CHAR = '-'
MERGE_GAP_CHAR = '.'
STO_META_TO_ATTR = [
# required
('ID', '_uid'),
('AC', 'accession'),
('DE', 'description'),
('AU', 'author'),
('SE', 'meta.source_seed'),
('SS', 'meta.source_structure'),
('BM', 'meta.build_method'),
('SM', 'meta.search_method'),
('GA', 'meta.gathering_threshold'),
('TC', 'meta.trusted_cutoff'),
('NC', 'meta.noise_cutoff'),
('AC', 'accession'),
('TP', 'aln_type'),
('TC', 'min_bitscore'),
('SQ', None),
# optional
('DC', 'meta.db_comment'),
('DR', {
'CATH': 'cath_version',
'DOPS': 'dops_score',
'INTERPRO': 'interpro',
}),
('RC', 'meta.ref_comment'),
('RN', 'meta.ref_number'),
('RM', 'meta.ref_medline'),
('RT', 'meta.ref_title'),
('RA', 'meta.ref_author'),
('RL', 'meta.ref_location'),
('PI', 'meta.prev_id'),
('KW', 'meta.keywords'),
('CC', 'meta.comment'),
('NE', 'meta.pfam_accession'),
('NL', 'meta.seq_location'),
('WK', 'meta.wikipedia_link'),
('CL', 'meta.pfam_clan'),
('MB', 'meta.pfam_clan_membership'),
# trees
('NH', 'tree_nhx'),
('TN', 'tree_id'),
]
def __init__(self, seqs=None, *, uid=None, accession=None, author=None,
cath_version=None, dops_score=None, description=None,
aln_type=None, min_bitscore=None, tree_nhx=None, tree_id=None):
self.meta = {} # per file meta data
self.seq_meta = {} # consensus sequence-based meta data
self.__seq_ids = set()
self._uid = uid
self.accession = accession
self.author = author
self.description = description
self.cath_version = cath_version
self.dops_score = dops_score
self.accession = accession
self.aln_type = aln_type
self.min_bitscore = min_bitscore
self.tree_nhx = tree_nhx
self.tree_id = tree_id
self.seqs = seqs if seqs else []
self.__aln_positions = 0
self._merge_counter = 0
@property
def uid(self):
"""Returns the id of this Align object."""
return self._uid
def set_uid(self, uid):
"""Sets the id of this Align object."""
self._uid = uid
def _next_merge_id(self):
self._merge_counter += 1
return self._merge_counter
@property
def sequences(self):
"""Provides access to the Sequence objects in the alignment."""
return self.seqs
@property
def aln_positions(self):
"""Returns the number of alignment positions."""
return self.__aln_positions
@aln_positions.setter
def aln_positions(self, value):
self.__aln_positions = value
@property
def count_sequences(self):
"""Returns the number of sequences in the alignment."""
return len(self.seqs)
@property
def total_gap_positions(self):
"""Returns the total number of gaps in the alignment."""
total_gaps = 0
for s in self.seqs:
total_gaps += s.seq.count(self.REF_GAP_CHAR)
total_gaps += s.seq.count(self.MERGE_GAP_CHAR)
return total_gaps
@property
def total_positions(self):
"""Returns the total number of positions in the alignment."""
return self.count_sequences * self.aln_positions
def find_first_seq_by_accession(self, acc):
"""Returns the first Sequence with the given accession."""
seqs_with_acc = [seq for seq in self.seqs if seq.accession == acc]
return seqs_with_acc[0]
def find_seq_by_id(self, _id):
"""Returns the Sequence corresponding to the provided id."""
seqs_with_id = [seq for seq in self.seqs if seq.uid == _id]
if len(seqs_with_id) > 1:
raise err.SeqIOError("Found more than one ({}) sequence matching id '{}'".format(
len(seqs_with_id), _id))
if not seqs_with_id: # ie empty list
raise err.NoMatchesError('failed to find sequence with id {} in alignment'.format(_id))
return seqs_with_id[0]
def find_seq_by_accession(self, acc):
"""Returns the Sequence corresponding to the provided id."""
seqs_with_acc = [seq for seq in self.seqs if seq.accession == acc]
if len(seqs_with_acc) > 1:
raise err.TooManyMatchesError(
"Found more than one ({}) sequence matching accession '{}'".format(
len(seqs_with_acc), acc),)
if len(seqs_with_acc) == 0:
raise err.NoMatchesError(
'failed to find sequence with accession {} in alignment'.format(acc))
return seqs_with_acc[0]
def get_seq_at_offset(self, offset):
"""Returns the Sequence at the given offset (zero-based)."""
return self.seqs[offset]
@classmethod
def from_fasta(cls, fasta_io):
"""Initialises an alignment object from a FASTA file / string / io"""
aln = Align()
aln.read_sequences_from_fasta(fasta_io)
return aln
@classmethod
def from_pir(cls, pir_io):
"""Initialises an alignment object from a PIR file / string / io"""
aln = Align()
aln.read_sequences_from_pir(pir_io)
return aln
@staticmethod
def _get_io_from_file_or_string(file_or_string):
filename = str(file_or_string)
if isinstance(file_or_string, str):
filename = '<string>'
if file_or_string[0] in ('>', '#'): # fasta or stockholm
_io = io.StringIO(file_or_string)
elif file_or_string.endswith('.gz'):
_io = gzip.open(file_or_string, 'rt')
else:
_io = open(file_or_string, 'rt')
elif isinstance(file_or_string, io.IOBase):
_io = file_or_string
else:
_io = file_or_string
LOG.warning("unexpected io type: %s", repr(file_or_string))
return _io, filename
@classmethod
def from_stockholm(cls, sto_io, *, nowarnings=False):
"""Initialises an alignment object from a STOCKHOLM file / string / io"""
sto_io, sto_filename = cls._get_io_from_file_or_string(sto_io)
aln = cls()
sto_header = sto_io.readline()
assert sto_header.startswith('# STOCKHOLM 1.0')
aln_meta = {}
aln_seq_meta = {}
seq_meta_by_id = {}
seq_aa_by_id = {}
aln_meta_unrecognised_features = {}
gc_meta_to_attr = {meta: attr for (meta, attr) in cls.STO_META_TO_ATTR}
line_count = 0
for line in sto_io:
line_count += 1
line = line.strip()
if line.startswith('#=GF'):
try:
_, feature, per_file_ann = line.split(None, 2)
except ValueError:
if not nowarnings:
LOG.warning('ignoring GF record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
if feature not in gc_meta_to_attr:
raise err.ParseError(
'encountered unexpected GF tag {} in line {} "{}" (known tags: {})'.format(
feature, line_count, line, repr(gc_meta_to_attr)))
attr = gc_meta_to_attr[feature]
if type(attr) is dict:
key, val = re.compile(r'[;:]\s+').split(per_file_ann, maxsplit=1)
per_file_ann = val
if key in attr:
attr = attr[key]
else:
LOG.warning('encountered unexpected GF tag %s->%s in line %s "%s" (known tags: %s)',
feature, key, line_count, line, repr(attr))
if feature not in aln_meta_unrecognised_features:
aln_meta_unrecognised_features[feature] = []
aln_meta_unrecognised_features[feature].extend([per_file_ann])
attr = None
if attr:
if attr.startswith('meta.'):
attr = attr[len('meta.'):]
aln_meta[attr] = per_file_ann
else:
LOG.debug('setting aln attr "%s" to "%s"', attr, per_file_ann)
setattr(aln, attr, per_file_ann)
elif line.startswith('#=GC'):
try:
_, feature, per_col_ann = line.split(None, 2)
aln_seq_meta[feature] = per_col_ann
except ValueError:
if not nowarnings:
LOG.warning('ignoring GC record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
elif line.startswith('#=GS'):
try:
_, seq_id, feature, per_seq_ann = line.split(None, 3)
if feature == 'DR':
dr_type, per_seq_ann = per_seq_ann.split(None, 1)
dr_type = dr_type.rstrip(';')
feature = feature + '_' + dr_type
if seq_id not in seq_meta_by_id:
seq_meta_by_id[seq_id] = {}
seq_meta_by_id[seq_id][feature] = per_seq_ann
except ValueError:
if not nowarnings:
LOG.warning('ignoring GS record with incorrect columns (%s:%s "%s")',
sto_filename, line_count, line)
except:
raise err.ParseError('failed to parse line {} "{}"'.format(
line_count, line))
elif line.startswith('#=GR'):
_, seq_id, feature, per_res_ann = line.split(None, 3)
seq_meta_by_id[seq_id][feature] = per_res_ann
elif line.startswith('//'):
pass
else:
seq_id, seq_aa = line.split()
if seq_id not in seq_aa_by_id:
seq_aa_by_id[seq_id] = ''
seq_aa_by_id[seq_id] += seq_aa
for seq_id, seq_aa in seq_aa_by_id.items():
seq_meta = seq_meta_by_id[seq_id] if seq_id in seq_meta_by_id else {}
seq = Sequence(seq_id, seq_aa, meta=seq_meta)
aln.add_sequence(seq)
for key, val in aln_meta.items():
aln.meta[key] = val
for key, val in aln_seq_meta.items():
aln.seq_meta[key] = val
sto_io.close()
return aln
def read_sequences_from_fasta(self, fasta_io):
"""Parses aligned sequences from FASTA (str, file, io) and adds them to the current
Align object. Returns the number of sequences that are added."""
fasta_io, fasta_filename = __class__._get_io_from_file_or_string(fasta_io)
re_seqstr = re.compile(r'^[a-zA-Z.\-]+$')
seq_added = 0
current_hdr = None
current_seq = ''
line_count = 0
for line in fasta_io:
line_count += 1
line = line.rstrip()
if line == "":
break
if line[0] == '>':
if current_seq:
seq = Sequence(current_hdr, current_seq)
self.add_sequence(seq)
current_seq = ''
seq_added += 1
current_hdr = line[1:]
else:
if not re_seqstr.match(line):
raise err.SeqIOError(
('encountered an error parsing FASTA: '
'string "{}" does not look like a sequence ({}:{})').format(
line, fasta_filename, line_count))
if not current_hdr:
raise err.SeqIOError(
('encountered an error parsing FASTA: '
'found sequence "{}" without a header ({}:{})').format(
line, fasta_filename, line_count))
current_seq += str(line)
fasta_io.close()
if current_seq:
seq = Sequence(current_hdr, current_seq)
self.add_sequence(seq)
seq_added += 1
return seq_added
def read_sequences_from_pir(self, pir_io):
"""Parse aligned sequences from PIR (str, file, io) and adds them to the current
Align object. Returns the number of sequences that are added."""
pir_io, pir_filename = __class__._get_io_from_file_or_string(pir_io)
re_seqstr = re.compile(r'^[a-zA-Z.\-]+\*?$')
seq_added = 0
current_hdr = None
current_desc = None
current_seq = ''
line_count = 0
for line in pir_io:
line_count += 1
line = line.rstrip()
if line == "":
continue
if line[0] == '>':
# following line is description as free text
if current_seq:
current_seq = current_seq.replace("*", "")
seq = Sequence(current_hdr, current_seq, description=current_desc)
self.add_sequence(seq)
current_seq = ''
seq_added += 1
seq_type, current_hdr = line[1:].split(';')
line = next(pir_io).rstrip()
current_desc = line
else:
if not re_seqstr.match(line):
raise err.SeqIOError(
('encountered an error parsing PIR: '
'string "{}" does not look like a sequence ({}:{})').format(
line, pir_filename, line_count))
if not current_hdr:
raise err.SeqIOError(
('encountered an error parsing PIR: '
'found sequence "{}" without a header ({}:{})').format(
line, pir_filename, line_count))
current_seq += str(line)
pir_io.close()
if current_seq:
current_seq = current_seq.replace("*", "")
seq = Sequence(current_hdr, current_seq, description=current_desc)
self.add_sequence(seq)
seq_added += 1
return seq_added
def _reindex_seq_ids(self):
self.__seq_ids = set()
for seq in self.seqs:
self.__seq_ids.add(seq.uid)
def add_sequence(self, seq:Sequence, *, offset:int=None):
"""
Add a sequence to this alignment.
Args:
offset (int): the index in the list where the sequence should be added (default: append)
"""
if not offset:
offset = len(self.sequences)
if seq.uid in self.__seq_ids:
raise err.SeqIOError((
"Error: cannot add a sequence with id {}, "
"since this alignment already has a sequence with that id. [{}]").format(
seq.uid, ",".join(self.__seq_ids)))
if self.aln_positions:
if self.aln_positions != seq.length():
raise err.SeqIOError((
"Error: cannot add a sequence (id:{}) "
"with {} positions to an alignment with {} positions.").format(
seq.uid, seq.length(), self.aln_positions))
else:
self.__aln_positions = seq.length()
self.seqs.insert(offset, seq)
self.__seq_ids.add(seq.uid)
return seq
def subset(self, ids, *, collapse_gaps=True):
"""
Returns a subset of the alignment containing just the sequence ids
"""
seqs = [self.find_seq_by_id(i) for i in ids]
new_align = Align(seqs=seqs)
if collapse_gaps:
new_align = new_align.remove_alignment_gaps()
return new_align
def remove_sequence_by_id(self, seq_id: str):
"""Removes a sequence from the alignment."""
for idx, seq in enumerate(self.seqs):
if seq.uid == seq_id:
LOG.info("Removing sequence with '{}' from alignment".format(seq_id))
del self.seqs[idx]
return seq
raise err.NoMatchesError('failed to find sequence with id {}'.format(seq_id))
def remove_alignment_gaps(self):
"""Return a new alignment after removing alignment positions
that contain a gap for all sequences."""
seqs = self.seqs
seq_length = seqs[0].length()
new_seq_strings = ["" for s in range(len(seqs))]
for aln_offset in range(seq_length):
total_gaps = 0
for seq in seqs:
if seq.seq[aln_offset] == '-' or seq.seq[aln_offset] == '.':
total_gaps += 1
if total_gaps < len(seqs):
for seq_pos in range(len(seqs)):
res = seqs[seq_pos].seq[aln_offset]
# print( "seq[{}:{}] pos:{} res:{}".format(
# aln_offset, seqs[seq_pos].uid, seq_pos, res) )
new_seq_strings[seq_pos] += res
else:
LOG.debug("Removing complete gap from alignment offset: %s", aln_offset)
new_aln = Align()
for seq_pos in range(len(new_seq_strings)):
hdr = seqs[seq_pos]._hdr
seq_str = new_seq_strings[seq_pos]
seq = Sequence(hdr, seq_str)
new_aln.add_sequence(seq)
return new_aln
def insert_gap_at_offset(self, offset, gap_char='-'):
"""Insert a gap char at the given offset (zero-based)."""
self.__aln_positions += 1
for s in self.seqs:
s.insert_gap_at_offset(offset, gap_char)
def set_gap_char_at_offset(self, offset, gap_char):
"""Override the gap char for all sequences at a given offset."""
for s in self.seqs:
s.set_gap_char_at_offset(offset, gap_char)
def lower_case_at_offset(self, start, stop=None):
"""Lower case all the residues in the given alignment window."""
for s in self.seqs:
s.lower_case_at_offset(start, stop)
def slice_seqs(self, start, stop=None):
"""Return an array of Sequence objects from start to end."""
return [Sequence(s._hdr, s.slice_seq(start, stop)) for s in self.seqs]
def merge_alignment(self, merge_aln, ref_seq_acc: str,
ref_correspondence: Correspondence = None,
*, cluster_label=None, merge_ref_id=False, self_ref_id=False):
"""
Merges aligned sequences into the current object via a reference sequence.
Sequences in ``merge_aln`` are brought into the current alignment using
the equivalences identified in reference sequence ``ref_seq_acc`` (which
must exist in both the ``self`` and ``merge_aln``).
This function was originally written to merge FunFam alignments
according to structural equivalences identified by CORA (a multiple
structural alignment tool). Moving between structure and sequence
provides the added complication that
sequences in the structural alignment (CORA) are based on ATOM records,
whereas sequences in the merge alignment (FunFams) are based on SEQRES
records. The ``ref_correspondence`` argument allows this mapping to be
taken into account.
Args:
merge_aln (Align): An Align containing the reference
sequence and any additional sequences to merge.
ref_seq_acc (str): The accession that will be used to find the
reference sequence in the current alignment and merge_aln
ref_correspondence (Correspondence): An optional Correspondence
object that provides a mapping between the reference
sequence found in ``self`` (ATOM records) and reference
sequence as it appears in ``merge_aln`` (SEQRES records).
cluster_label (str): Provide a label to differentiate the sequences
being merged (eg for groupsim calculations). A default label
is provided if this is ``None``.
self_ref_id (str): Specify the id to use when adding the ref sequence
from the current alignment.
merge_ref_id (str): Specify the id to use when adding the ref sequence
from the merge alignment. By default this sequence is only inluded
in the final alignment (as ``<id>_merge``) if a custom
correspondence is provided.
Returns:
[Sequence]: Array of Sequences added to the current alignment.
Raises:
MergeCorrespondenceError: problem mapping reference
sequence between alignment and correspondence
"""
merge_aln = merge_aln.copy()
if not cluster_label:
cluster_label = self._next_merge_id()
for seq in merge_aln.seqs:
seq.set_cluster_id(cluster_label)
ref_seq_in_ref = self.find_seq_by_accession(ref_seq_acc)
ref_seq_in_ref.set_cluster_id(cluster_label)
ref_seq_in_merge = merge_aln.find_seq_by_accession(ref_seq_acc)
if self_ref_id:
ref_seq_in_ref.set_uid(self_ref_id)
# if the merge_ref_id has been specified, or there is not a 1:1 correspondence
# between reference sequence in the alignments, then the merged ref sequence
# will be included in the final alignment. Otherwise it will be removed.
if merge_ref_id:
ref_seq_in_merge.set_uid(merge_ref_id)
else:
ref_seq_in_merge.accession += '_merge'
ref_id = ref_seq_in_merge.accession_and_seginfo
ref_seq_in_merge.set_uid(ref_id)
del ref_id
if ref_seq_in_ref.uid is ref_seq_in_merge.uid:
raise err.DuplicateSequenceError((
'sequence in ref alignment [{}] cannot have the same id as '
'sequence in merge alignment [{}] (consider specifying self_ref_id'
'or merge_ref_id)').format(ref_seq_in_ref.uid, ref_seq_in_merge.uid))
self._reindex_seq_ids()
if ref_correspondence or merge_ref_id:
merge_id_to_remove = None
else:
merge_id_to_remove = ref_seq_in_merge.uid
if ref_correspondence is None:
# fake a 1:1 correspondence for internal use
# ignore any residue that does not have a seq_num (ie gap)
residues = [res for res in ref_seq_in_ref.get_residues() if res.seq_num]
for r in residues:
r.set_pdb_label(str(r.seq_num))
# LOG.debug("fake correspondence: residue={}".format(repr(r)))
ref_correspondence = Correspondence(ref_seq_acc, residues=residues)
# check: ref sequence (in self) must match the ATOM sequence in Correspondence
ref_no_gaps = ref_seq_in_ref.seq_no_gaps
corr_no_gaps = ref_correspondence.atom_sequence.seq_no_gaps
if ref_no_gaps != corr_no_gaps:
raise err.MergeCorrespondenceError(
seq_id=ref_seq_acc, aln_type='current', seq_type='ATOM',
ref_no_gaps=ref_no_gaps, corr_no_gaps=corr_no_gaps)
# check: ref sequence (in merge) must match the SEQRES sequence in Correspondence
ref_no_gaps = ref_seq_in_merge.seq_no_gaps
corr_no_gaps = ref_correspondence.seqres_sequence.seq_no_gaps
if ref_no_gaps != corr_no_gaps:
raise err.MergeCorrespondenceError(
seq_id=ref_seq_acc, aln_type='merge', seq_type='SEQRES',
ref_no_gaps=ref_no_gaps, corr_no_gaps=corr_no_gaps)
# clean up
del ref_no_gaps
del corr_no_gaps
ref_aln_pos = 0
ref_corr_pos = 0
merge_aln_pos = 0
correspondence_length = ref_correspondence.seqres_length
LOG.debug("ref_alignment.positions: {}".format(self.aln_positions))
LOG.debug("merge_alignment.positions: {}".format(merge_aln.aln_positions))
LOG.debug("ref_seq_in_ref: {}".format(str(ref_seq_in_ref)))
LOG.debug("ref_seq_in_merge: {}".format(str(ref_seq_in_merge)))
while True:
if merge_aln_pos >= merge_aln.aln_positions \
and ref_aln_pos >= self.aln_positions \
and ref_corr_pos >= correspondence_length:
break
LOG.debug("REF %s/%s; CORRESPONDENCE %s/%s; MERGE %s/%s",
ref_aln_pos, self.aln_positions, ref_corr_pos,
correspondence_length, merge_aln_pos, merge_aln.aln_positions)
# sort the gaps in the reference alignment
if ref_aln_pos < self.aln_positions:
for seq in self.slice_seqs(0, ref_aln_pos):
LOG.debug( "{:<10} {}".format("REF", str(seq)) )
ref_res_in_ref = ref_seq_in_ref.get_res_at_offset(ref_aln_pos)
LOG.debug("REF_POSITION {:>3} of {:>3} => '{}'".format(
ref_aln_pos, self.aln_positions, ref_res_in_ref))
# insert all the gaps in the reference alignment into the merge sequences
# keep doing this until we don't have any more gaps
if Sequence.is_gap(ref_res_in_ref):
LOG.debug(("GAP '{}' in ref sequence in REF alignment [{}], "
"inserting gap '{}' at position [{}] in all merge sequences").format(
ref_res_in_ref, ref_aln_pos, ref_res_in_ref, merge_aln_pos))
merge_aln.insert_gap_at_offset(merge_aln_pos, gap_char=ref_res_in_ref)
# this is a gap: do NOT increment ref_corr_pos
ref_aln_pos += 1
merge_aln_pos += 1
continue
# sort the gaps in the merge alignment
if merge_aln_pos < merge_aln.aln_positions:
# for seq in merge_aln.slice_seqs(0, merge_aln_pos):
# LOG.debug( "{:<10} {}".format("MERGE", str(seq)) )
ref_res_in_merge = ref_seq_in_merge.get_res_at_offset(merge_aln_pos)
LOG.debug("MERGE_POSITION {:>3} of {:>3} => '{}'".format(
ref_aln_pos, self.aln_positions, ref_res_in_ref))
# insert all the gaps in the merge alignment into the ref sequences
# keep doing this until we don't have any more gaps
if Sequence.is_gap(ref_res_in_merge):
LOG.debug(("GAP '{}' in ref sequence in MERGE alignment [{}], "
"inserting gap '{}' at position [{}] in all ref sequences").format(
ref_res_in_merge, merge_aln_pos, Align.MERGE_GAP_CHAR, merge_aln_pos))
self.insert_gap_at_offset(ref_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
merge_aln.lower_case_at_offset(merge_aln_pos)
merge_aln.set_gap_char_at_offset(merge_aln_pos, '.')
#ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
continue
# if there are gaps in the correspondence then we add gaps to the ref sequence here
if ref_corr_pos < correspondence_length:
for seq in ref_correspondence.to_sequences():
seq = seq.slice_seq(0, ref_corr_pos)
LOG.debug( "{:<10} {}".format("CORR", str(seq)) )
ref_res_in_corr = ref_correspondence.get_res_at_offset(ref_corr_pos)
if ref_res_in_corr.pdb_label is None:
LOG.debug(("GAP '{}' in ATOM records of correspondence [{}], "
"inserting gap '{}' at position [{}] in ref sequences").format(
'*', ref_corr_pos, Align.MERGE_GAP_CHAR, ref_aln_pos))
#merge_aln.insert_gap_at_offset(merge_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
self.insert_gap_at_offset(ref_aln_pos, gap_char=Align.MERGE_GAP_CHAR)
merge_aln.lower_case_at_offset(merge_aln_pos)
merge_aln.set_gap_char_at_offset(merge_aln_pos, '.')
# IMPORTANT: do not increment merge_aln_pos
ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
continue
ref_corr_pos += 1
ref_aln_pos += 1
merge_aln_pos += 1
LOG.info("FINISHED MERGE")
# for seq in ref_correspondence.to_sequences():
# seq = seq.slice_seq(0, ref_corr_pos)
# LOG.debug( "{:<10} {}".format("CORR", str(seq)) )
# for seq in self.seqs:
# LOG.debug( "{:<10} {}".format("REF", str(seq)) )
# for seq in merge_aln.seqs:
# LOG.debug( "{:<10} {}".format("MERGE", str(seq)) )
# add the merged sequences into this alignment
for seq in merge_aln.seqs:
self.add_sequence(seq)
# for seq in self.seqs:
# LOG.debug( "{:<10} {}".format("MERGED", str(seq)) )
# test the final, merged alignment
# 1. get sequences that correspond to the input aln
# 2. remove alignment positions where there's a gap in the reference sequence
LOG.debug("Checking merge results for %s (%s) ...",
ref_seq_acc, repr(ref_seq_in_merge._hdr))
for original_seq in merge_aln.seqs:
# searching by accession is necessary for CATH domains (since the headers
# in the structure-based alignment do not have segment information),
# however uniprot accessions can appear multiple times so we need to use
# the full id
if original_seq.is_cath_domain:
seq = self.find_seq_by_accession(original_seq.accession)
else:
seq = self.find_seq_by_id(original_seq.uid)
# LOG.debug('Working on sequence: {}'.format(str(original_seq)))
# this provides the residues in the merge alignment with seqres numbering
ref_merge_residues = ref_seq_in_merge.get_residues()
# the lookup lets us go from the seq numbering to the sequence offset
ref_merge_seqnum_to_seqpos = {}
for seq_pos, res in enumerate([res for res in ref_merge_residues if res.seq_num], 1):
ref_merge_seqnum_to_seqpos[res.seq_num] = seq_pos
if not seq:
raise err.SeqIOError("failed to find sequence with id '{}' in merge aln".format(seq.uid))
for aln_offset in range(self.aln_positions):
ref_res = ref_seq_in_ref.get_res_at_offset(aln_offset)
merged_res_at_aln_offset = seq.get_res_at_offset(aln_offset)
if ref_res == self.MERGE_GAP_CHAR:
# everything else should be a '.' or a lowercase residue
assert merged_res_at_aln_offset == '.' or re.match(r'[a-z]', merged_res_at_aln_offset)
elif ref_res == self.REF_GAP_CHAR:
# everything else should be a '-' or an uppercase residue
assert merged_res_at_aln_offset == '-' or re.match(r'[A-Z]', merged_res_at_aln_offset)
else:
# find the sequence offset of this aln position in the ref sequence
ref_seq_pos_in_ref = ref_seq_in_ref.get_seq_position_at_offset(aln_offset)
# use the correspondence to find the equivalent reference residue in the merge alignment
ref_corr_res = ref_correspondence.get_res_by_atom_pos(ref_seq_pos_in_ref)
ref_seq_num_in_merge = ref_corr_res.seq_num
if ref_seq_num_in_merge is None:
raise err.GeneralError(('weird... found a residue without a seq_num in the correspondence record '
' ref_seq_pos_in_ref: {}, res: {}, corr: {}').format(
ref_seq_pos_in_ref, repr(ref_corr_res), repr(ref_correspondence)))
if ref_seq_num_in_merge not in ref_merge_seqnum_to_seqpos:
raise err.OutOfBoundsError(('failed to find seq_num {} ({}) in seqnum/seqpos '
'lookup: {}\ncorrespondence (length: {})').format(
ref_seq_num_in_merge, repr(ref_corr_res), ref_merge_seqnum_to_seqpos,
ref_correspondence.seqres_length, ))
# find out where this seq_num occurs in the merge sequence (account for segment numbering)
ref_seq_pos_in_merge = ref_merge_seqnum_to_seqpos[ref_seq_num_in_merge]
# find the aln offset for the equivalent position in the original merge alignment
ref_merge_offset = ref_seq_in_merge.get_offset_at_seq_position(ref_seq_pos_in_merge)
# LOG.debug("ref_seq_pos (ref): {}, ref_seq_pos (merge): {}, correspondence_res: {}, ref_merge_offset: {}".format(
# ref_seq_pos_in_ref, ref_seq_pos_in_merge, repr(ref_corr_res), ref_merge_offset
# ))
# find the residue at the equivalent position in the merge alignment
original_res = original_seq.get_res_at_offset(ref_merge_offset)
if merged_res_at_aln_offset != original_res:
raise err.MergeCheckError(("Expected the merged residue '{}' to "
"match the original residue '{}' at alignment "
"offset {} (sequence: '{}')\n\n"
"CORR_ATOM: {}\n"
"CORR_SEQRES: {}\n"
"\n\n"
"REF_SEQ_IN_REF: {}\n"
"REF_SEQ_IN_MERGE: {}\n"
"ORIGINAL_SEQ: {}\n"
" {aln_pointer:>{merge_pos}}\n"
"MERGED_SEQ: {}\n"
" {aln_pointer:>{aln_pos}}\n"
"(aln_offset={}, seq_pos(ref)={}, seq_num(merge)={}, seq_pos(merge)={}, ref_merge_offset={})"
).format(
merged_res_at_aln_offset, original_res, aln_offset, seq.uid,
ref_correspondence.atom_sequence,
ref_correspondence.seqres_sequence,
ref_seq_in_ref.seq,
ref_seq_in_merge.seq,
original_seq.seq,
seq.seq,
aln_offset, ref_seq_pos_in_ref, ref_seq_num_in_merge, ref_seq_pos_in_merge, ref_merge_offset,
aln_pointer='^', aln_pos=(aln_offset+1), merge_pos=(ref_merge_offset+1)
))
LOG.info("Finshed checking merge for {} ({})".format(ref_seq_acc, repr(ref_seq_in_merge._hdr)))
# if we have not been given a correspondence then there's no point
# adding the reference sequence from the reference alignment (since
# there is a 1:1 mapping)
if merge_id_to_remove:
LOG.info("Removing reference sequence '%s' from alignment (because 'merge_ref_id' or 'ref_correspondence' is not set)",
merge_id_to_remove)
self.remove_sequence_by_id(merge_id_to_remove)
seqs_by_cluster_id = {}
for seq in self.seqs:
if seq.cluster_id not in seqs_by_cluster_id:
seqs_by_cluster_id[seq.cluster_id] = []
seqs_by_cluster_id[seq.cluster_id].extend([seq])
for cluster_id in seqs_by_cluster_id:
seq_ids = ', '.join([s.uid for s in seqs_by_cluster_id[cluster_id]])
LOG.debug("Cluster %s: %s", cluster_id, seq_ids)
return merge_aln.seqs
def copy(self):
"""Return a deepcopy of this object."""
new_aln = Align()
new_seqs = [s.copy() for s in self.seqs]
new_aln.seqs = new_seqs
new_aln.aln_positions = new_aln.seqs[0].length()
return new_aln
def to_fasta(self, wrap_width=80):
"""Returns the alignment as a string (FASTA format)"""
fasta_str = ''
for seq in self.seqs:
fasta_str += seq.to_fasta(wrap_width=wrap_width)
return fasta_str
def to_pir(self, wrap_width=80):
"""Returns the alignment as a string (PIR format)"""
pir_str = ''
for seq in self.seqs:
pir_str += seq.to_pir(wrap_width=wrap_width)
return pir_str
def write_fasta(self, fasta_file, wrap_width=80):
"""Write the alignment to a file in FASTA format."""
with open(fasta_file, 'w') as f:
for seq in self.seqs:
f.write(seq.to_fasta(wrap_width=wrap_width))
def write_pir(self, pir_file, wrap_width=80, *, use_accession=False):
"""Write the alignment to a file in PIR format."""
with open(pir_file, 'w') as f:
for seq in self.seqs:
f.write(seq.to_pir(wrap_width=wrap_width, use_accession=use_accession))
def add_scorecons(self):
"""Add scorecons annotation to this alignment."""
from cathpy.core.util import ScoreconsRunner
scons = ScoreconsRunner()
LOG.info("Calculating scorecons / DOPS ...")
# output alignment to tmp fasta file
scons_result = scons.run_alignment(self)
self.dops_score = scons_result.dops
self.seq_meta['scorecons'] = scons_result.to_string
def add_groupsim(self):
"""Add groupsim annotation to this alignment."""
from cathpy.core.util import GroupsimRunner
gs = GroupsimRunner()
LOG.info("Calculating GroupSim ...")
# output alignment to tmp fasta file
gs_result = gs.run_alignment(self)
self.seq_meta['groupsim'] = gs_result.to_string
def write_sto(self, sto_file, *, meta=None):
"""Write the alignment to a file in STOCKHOLM format."""
# putting these here to separate the data from the formatting
sto_format = '1.0'
# allow meta keys to be provided in args, otherwise fill with the
# appropriate alignment attributes
aln_meta = {}
if meta:
for key, attr in self.STO_META_TO_ATTR:
aln_meta[key] = meta.get(key, None)
comment_pad = 0
for seq in self.seqs:
comment_pad = max(comment_pad, len(seq.uid) + 1)
seq_pad = comment_pad + 8
gc_pad = seq_pad - 5
# single data point about the file
def _GF(f, key, val):
f.write('#=GF {} {}\n'.format(key, val))
# single data point about each sequence
def _GS(f, seq_id, key, val):
if key.startswith('DR_'):
val = "{}; {}".format(key[3:], val)
key = 'DR'
f.write('#=GS {:<{comment_pad}} {} {}\n'.format(seq_id, key, val, comment_pad=comment_pad))
# positional data about the file
def _GC(f, key, per_pos_str):
f.write('#=GC {:<{gc_pad}} {}\n'.format(key, per_pos_str,
gc_pad=gc_pad))
# positional data about each sequence
def _GR(f, seq_id, key, per_pos_str):
f.write('#=GR {:<{comment_pad}} {} {}\n'.format(seq_id, key, per_pos_str, comment_pad=comment_pad))
def _SEQ(f, seq):
f.write('{:<{seq_pad}} {}\n'.format(seq.uid, seq.seq, seq_pad=seq_pad))
def _START(f):
f.write('# STOCKHOLM {}\n'.format(sto_format))
def _END(f):
f.write('//\n')
with open(sto_file, 'w') as f:
_START(f)
_GF(f, 'ID', aln_meta.get('ID', self.uid))
_GF(f, 'DE', aln_meta.get('DE', self.description))
_GF(f, 'AC', aln_meta.get('AC', self.accession))
_GF(f, 'TP', aln_meta.get('TP', self.aln_type))
if self.cath_version:
_GF(f, 'DR', 'CATH: ' + self.cath_version)
if self.dops_score:
_GF(f, 'DR', 'DOPS: {:.3f}'.format(float(self.dops_score)))
for key, val in sorted(self.meta.items()):
_GF(f, key, val)
for seq in self.seqs:
for key, val in seq.meta.items():
_GS(f, seq.uid, key, val)
if self.min_bitscore:
_GF(f, 'TC', self.min_bitscore)
_GF(f, 'SQ', self.count_sequences)
for seq in self.seqs:
_SEQ(f, seq)
for key, val in sorted(self.seq_meta.items()):
_GC(f, key, val)
_END(f)
def get_meta_summary(self):
"""
Returns summary of information about meta data
This makes some assumptions about the formatting of certain `GS DR` records in
stockholm files.
"""
uniq_go_counts = {}
uniq_ec_counts = {}
cath_domain_count = 0
nodes_by_id = {}
tree = dendropy.Tree()
nodes_by_id['ROOT'] = tree.seed_node
all_taxon_terms = set()
for seq in self.seqs:
go_terms = []
ec_terms = []
org_terms = []
if seq.is_cath_domain:
cath_domain_count += 1
if 'DR_GO' in seq.meta:
go_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_GO'].split(';')]))
if 'DR_EC' in seq.meta:
ec_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_EC'].split(';')]))
if 'DR_ORG' in seq.meta:
org_terms = list(filter(None,
[s.strip() for s in seq.meta['DR_ORG'].split(';')]))
for go_term in go_terms:
if go_term not in uniq_go_counts:
uniq_go_counts[go_term] = 0
uniq_go_counts[go_term] += 1
for ec_term in ec_terms:
if ec_term not in uniq_ec_counts:
uniq_ec_counts[ec_term] = 0
uniq_ec_counts[ec_term] += 1
for org_term in org_terms:
all_taxon_terms.add(org_term)
for idx in range(len(org_terms)-1, 0, -1):
org_term = org_terms[idx]
parent_org_term = org_terms[idx-1] if idx > 1 else 'ROOT'
node_id = '/'.join(org_terms[:idx])
if node_id not in nodes_by_id:
nodes_by_id[node_id] = dendropy.Node(label=org_term)
node = nodes_by_id[node_id]
parent_node_id = '/'.join(org_terms[:idx-1]) if idx > 1 else 'ROOT'
if parent_node_id not in nodes_by_id:
nodes_by_id[parent_node_id] = dendropy.Node(label=parent_org_term)
parent_node = nodes_by_id[parent_node_id]
parent_node.add_child(node)
if not hasattr(node, 'sequence_count'):
setattr(node, 'sequence_count', 0)
if not hasattr(parent_node, 'sequence_count'):
setattr(parent_node, 'sequence_count', 0)
node.sequence_count += 1
taxon_namespace = dendropy.TaxonNamespace(all_taxon_terms)
tree.taxon_namespace = taxon_namespace
for node_id, node in nodes_by_id.items():
taxon_id = node_id.split('/')[-1]
node.taxon = taxon_namespace.get_taxon(taxon_id)
node.label = "{} ({})".format(node.label, node.sequence_count)
tree.seed_node.label = "ROOT ({})".format(self.count_sequences)
# LOG.info("tree:\n{}".format(tree.as_ascii_plot(show_internal_node_labels=True)))
# LOG.info("newick: {}".format(tree.as_string(schema="newick")))
organism_newick = tree.as_string(schema="newick").strip()
uniq_ec_counts = uniq_ec_counts if uniq_ec_counts else None
uniq_go_counts = uniq_go_counts if uniq_go_counts else None
return AlignMetaSummary(
ec_term_counts=uniq_ec_counts,
go_term_counts=uniq_go_counts,
cath_domain_count=cath_domain_count,
seq_count=self.count_sequences,
dops_score=float(self.dops_score),
organism_newick=organism_newick,
)
def __str__(self):
return "\n".join([str(seq) for seq in self.seqs])
| 38.139674 | 134 | 0.561281 |
f71854cf216fd9c15655470c36650db459061d05 | 28,170 | py | Python | KSFD/ksfdtimeseries.py | leonavery/KSFD | 090e388df13a2674676cbaa53171f2a87291ba9b | [
"MIT"
] | null | null | null | KSFD/ksfdtimeseries.py | leonavery/KSFD | 090e388df13a2674676cbaa53171f2a87291ba9b | [
"MIT"
] | null | null | null | KSFD/ksfdtimeseries.py | leonavery/KSFD | 090e388df13a2674676cbaa53171f2a87291ba9b | [
"MIT"
] | null | null | null | """
MPI-aware read and write PETSc Vec to HDF5
The goal of this module is to save snapshots of a PETSc Vec to HDF5
files, and obviously to read them again later. The obvious way to do
this is parallel HDF5. Unfortunately, distributions of HDF5 and h5py
may be built without support for parallel operation. (In particular,
the conda-forge version doesn't have it.) This is accomplished through
the following kludge:
When a KSFD.TimeSeries is created with name tsname and argument mpiok
True, the runtime envirnoment is checked to find out if parallel HDF5
is enabled (using h5py.getconfig().mpi). If so, the data are stored in
an HDF5 file named
'{name}MPI.h5'.format(name=tsname).
Note: there is a serious problem with parallel HDF5: variable length
records can't be written. If you try, you get this exception:
OSError: Can't write data (Parallel IO does not support writing VL
datatypes yet)
Since that makes parallel HDF5 a nonstarter for my purposes, mpiok
defaults to False. You won't get parallel MPI unless you specifically
ask for it, and then dealing with the lack of VL records is your
problem.
If not, each process stores the data it owns in a file named
'{name}s{size}r{rank}.h5'.format(name=tsname, size=comm.size, rank=comm.rank)
where comm is the MPI communicator. If run sequentially the data will
all be stored in a file called '{name}s1r0.h5'. It is intended that
the *MPI.h5 file created using parallele HDF5 and the *s1r0.h5 file
created when running sequentially and parallel HDF5 is not available
will be the same.
The same procedure is used for finding the filename when opening in
read/write mode ('r+' or 'a').
When opening a TimeSeries for read (mode 'r') TimeSeries checks (in
order) for the *s<size>r<rank>.h5 file, then the *MPI.h5 file ,and
finally a *s1r0.h5 file, and opens the first it finds. In this case
the retrieve methods will only return the components of the vector
owned by the local process.
Finally, I will write a simple script to merge all the files of
*s<size>r<rank>.h5 series into a single *MPI.h5 file. In this way an
MPi process group of any size will be able to retrieve data written by
a process group of any size.
"""
import h5py, os, re, gc, time
import traceback as tb
import numpy as np
import petsc4py
from mpi4py import MPI
#
# These imports are placed inside a try/except so that this script can
# be executed standalone to check for syntax errors.
#
try:
from .ksfddebug import log
from .ksfdgrid import Grid
except ImportError:
from ksfddebug import log
from ksfdgrid import Grid
def logSERIES(*args, **kwargs):
log(*args, system='SERIES', **kwargs)
class KSFDTimeSeries:
"""
Base class for TimeSeries
KSFDTimeSeries is intended as an abstract base class for reading and
writing time series from KSFD solutions to HDF5 files. It is not
formally defined as an ABC: you can instantiate it if you really
wish, but it is not designed to make that a useful thing to do.
"""
def __init__(
self,
basename,
size=1,
rank=0,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
"""
Required parameter:
basename: the prefix of the filename.
Optional keyword parameters:
size=1: Number of MPI processes. This typically corresponds to
comm.size for an MPI communicator comm.
rank=0: Number of the MPI process that created this
file. Typically comm.rank.
mpiok=True: Whether parallel HDF5 should be used to store to
store all the data from all MPI processes in a single
file.
mode='r+': The file mode for opening the h5py.File.
retries=0. If nonzero, retry faile dopens this many times.
retry_interval=60: time (in secodns) between successive
retries. Note: the open will block while waiting for a
successful retry.
size, rank, and mpiok are used mostly to figure out what
filename to use. They need not correspond to the actual
current MPU configuration. For instance, they may correspond
to the config when the time series was created.
"""
self.get_filename(basename, size, rank, mpiok, mode)
self.retries = retries
self.retry_interval = retry_interval
self._size = size
self._rank = rank
self._mode = mode
self._tsf = self.open_with_retry()
_ = self.info # make sure '/info' exists
self.try_to_set('size', self.size)
self.try_to_set('rank', self.rank)
if 'times' in self.tsf:
self.ts = np.array(self.tsf['times'][()])
try:
self.ks = np.array(self.tsf['ks'][()])
except KeyError:
self.ks = np.arange(len(self.ts))
self.order = np.array(self.tsf['order'][()])
else:
self.ts = np.array([], dtype=float)
self.ks = np.array([], dtype=int)
self.order = np.array([], dtype=int)
self.lastk = self.ks.size - 1
self.sorted = False
self.tsf.flush()
def parse_filename(filename):
"""
filename is a name like 'bases2r1.h5'. parse_filename returns
(basename, size, rank, mpi) (('base', 2, 1, False) for the
example). For a filename like 'tests/test1mpi.h5', returns
('base', 1, 0, True).
"""
mpipat = '(.*)MPI\.h5'
nompi_pat = '(.*)s(\d+)r(\d+)\.h5'
res = re.fullmatch(mpipat, filename)
if res:
return (res[1], 1, 0, True)
res = re.fullmatch(nompi_pat, filename)
if res:
return (res[1], res[2], res[3], False)
raise ValueError(
"Couldn't parse filename {fname}".format(fname=filename)
)
def set_grid(self, grid):
self._grid = grid
self._dim = grid.dim
self._dof = grid.dof
if self.rank_owns_file:
self._ranges = grid.ranges
# if (
# 'ranges' in self.tsf and
# not np.all(self.tsf['ranges'][()] == self.ranges)
# ):
# raise ValueError(
# "data ranges {filerange} in {file} doesn't " +
# "match grid range {gridrange}".format(
# filerange=str(self.tsf['ranges'][()]),
# file=self.filename,
# gridrange=str(grid.ranges)
# )
# )
self.myslice = (slice(0, None),)*(self.dim + 1)
else:
self._ranges = tuple((0, np) for np in grid.nps)
#
# Slice of the global array belonging to this process:
self.myslice = (slice(0, None),) + tuple(
slice(*r) for r in grid.ranges
)
self.try_to_set('ranges', self.ranges)
def get_filename(self, basename, size=1, rank=0, mpiok=True,
mode='r+'):
"""
Get name of file to be opened by this process
self.filename is set to the name of the HDF5 file to be
opened. This is also returned as the function value. In
addition, the following flags are set:
self.creating: True if creating a new file.
self.rank_owns_file: True if the file will be exclusively
owned by this process.
"""
self.usempi = mpiok and h5py.get_config().mpi
name_nompi = '{name}s{size}r{rank}.h5'.format(
name=basename,
size=size,
rank=rank
)
name_mpi = '{name}MPI.h5'.format(name=basename)
name_seq = '{name}s1r0.h5'.format(name=basename)
self.driver = None
if self.usempi and os.path.isfile(name_mpi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = size == 1
self.filename = name_mpi
elif self.usempi and (mode[0] == 'w' or mode[0] == 'x'):
self.creating = True
self.rank_owns_file = size == 1
self.filename = name_mpi
elif os.path.isfile(name_nompi):
self.creating = mode[0] == 'w' or mode[0] == 'x'
self.rank_owns_file = True
self.filename = name_nompi
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_seq):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_seq
# Allow reading from MPi file even if we're not using MPI:
elif (mode == 'r' or mode == 'a') and os.path.isfile(name_mpi):
self.creating = False
self.rank_owns_file = size == 1
self.filename = name_mpi
else:
self.creating = mode != 'r'
self.rank_owns_file = not self.usempi
self.filename = name_mpi if self.usempi else name_nompi
if self.creating and not self.rank_owns_file and self.usempi:
self.driver = 'mpio'
if self.creating:
os.makedirs(os.path.dirname(self.filename), exist_ok=True)
logSERIES('self.filename', self.filename)
logSERIES('self.creating', self.creating)
logSERIES('self.rank_owns_file', self.rank_owns_file)
logSERIES('self.driver', self.driver)
logSERIES('self.usempi', self.usempi)
return self.filename
def open(self, filename, usempi, mode):
if mode in ['w', 'w-', 'x', 'a']:
dirname = os.path.dirname(os.path.abspath(filename))
try:
os.makedirs(dirname, exist_ok=True)
except FileExistsError:
pass
def grid_save(self):
grid = self.grid
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
self.try_to_set('/grid/' + a, getattr(grid, a))
def grid_read(self):
"""Reads grid params from open file, returns dict"""
ggroup = self.tsf['grid']
gd = {}
attrs = ['dim', 'dof', 'nps', 'bounds', 'spacing', 'order',
'stencil_width', 'stencil_type', 'boundary_type',
'globalSshape', 'globalVshape', 'globalCshape', 'Slshape',
'Vlshape', 'ranges', 'Clshape', 'Cashape',
'coordsNoGhosts', 'coordsWithGhosts',
]
for a in attrs:
try:
val = ggroup[a][()]
if a.endswith('shape'):
gd[a] = tuple(val)
elif np.isscalar(val):
gd[a] = val.item()
else:
gd[a] = val
except KeyError:
gd[a] = None
gd['width'] = gd['bounds'][0]
gd['height'] = gd['bounds'][1] if gd['dim'] > 1 else 1.0
gd['depth'] = gd['bounds'][2] if gd['dim'] > 2 else 1.0
gd['nx'] = gd['nps'][0]
gd['ny'] = gd['nps'][1] if gd['dim'] > 1 else 8
gd['nz'] = gd['nps'][2] if gd['dim'] > 2 else 8
return gd
def grid_load(self, gd=None):
"""Reads grid params from open file and creates new Grid."""
if gd is None:
gd = self.grid_read()
grid = Grid(
dim=gd['dim'],
width=gd['width'],
height=gd['height'],
depth=gd['depth'],
nx=gd['nx'],
ny=gd['ny'],
nz=gd['nz'],
dof=gd['dof'],
order=gd['order'],
stencil_width=gd['stencil_width'],
stencil_type=gd['stencil_type'],
boundary_type=gd['boundary_type']
)
self.set_grid(grid)
#
# info is a place for caller to store stuff
@property
def info(self):
"""Place for caller to store extra stuff"""
if not hasattr(self, '_info') or not self._info:
self._info = self.tsf.require_group('/info')
return self._info
@property
def tsFile(self):
"""The open h5File object"""
return self._tsf
@property
def tsf(self):
return self._tsf
@property
def size(self):
return self._size
@property
def rank(self):
return self._rank
@property
def mode(self):
return self._mode
@property
def ranges(self):
return self._ranges
@property
def comm(self):
return self._comm
@property
def grid(self):
return self._grid
@property
def dim(self):
return self._dim
@property
def dof(self):
return self._dof
def try_to_set(self, key, val):
"""Try to set self.tsf[key] to val, but ignore exceptions"""
if (self.mode == 'r'): return
try:
del self.tsf[key]
except KeyError:
pass
try:
self.tsf[key] = val
except ValueError:
pass
def _sort(self):
if getattr(self, 'sorted', False): return
ts = getattr(self, 'ts', np.array([]))
self.try_to_set('times', ts)
self.order = ts.argsort()
self.try_to_set('order', self.order)
self.sts = ts
self.sts.sort()
ks = getattr(self, 'ks', [])
lastk = getattr(self, 'lastk', -1)
self.try_to_set('ks', ks)
self.try_to_set('lastk', lastk)
self.sorted = True
def flush(self):
self._sort()
self.tsf.flush()
def temp_close(self):
"""
temp_close closes the HDF5 file in which the TimeSeries is
stored without destroying associated information. The file
can be reopened with little loss of time. temp_close and
reopen are intended for use during long solutions. If there is
a crash during solution, a temp-closed TimeSeries will be left
in a valid state for later use.
"""
self._sort()
self.tsf.close()
def open_with_retry(
self,
fname=None,
mode=None,
driver=None,
comm=None
):
if fname is None:
fname = self.filename
if mode is None:
mode = self.mode
if driver is None:
driver = self.driver
if comm is None:
comm = self.comm
if isinstance(comm, petsc4py.PETSc.Comm):
comm = comm.tompi4py()
logSERIES('fname, mode, driver, comm', fname, mode, driver, comm)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
except OSError:
retries_left = self.retries
if retries_left <= 0:
logSERIES('open failed: re-raising exception')
raise
while retries_left > 0:
logSERIES('reopen failed with OSError: {n} retries left'.format(
n=retries_left
))
logSERIES('tb.format_exc()', tb.format_exc())
time.sleep(self.retry_interval)
try:
if driver == 'mpio':
logSERIES('trying 4-argument open')
comm.Barrier()
logSERIES('comm.rank, comm.size', comm.rank, comm.size)
tsf = h5py.File(fname, mode=mode,
driver=driver, comm=comm)
else:
logSERIES('trying 3-argument open')
tsf = h5py.File(fname, mode=mode,
driver=driver)
failed = False
except OSError:
failed = True
if retries_left <= 1:
raise
if not failed:
break
retries_left -= 1
return tsf
def reopen(self):
"""
Reopen a temp_closed TimeSeries
"""
mode = self.mode if self.mode == 'r' else 'r+'
self._tsf = self.open_with_retry(mode=mode)
def close(self):
if not hasattr(self, '_tsf') or not self._tsf:
self.reopen()
self._sort()
self.tsf.close()
del self._tsf
gc.collect()
# def __del__(self):
# self.close()
def store(self, data, t, k=None):
if isinstance(data, petsc4py.PETSc.Vec):
vals = data.array.reshape(self.grid.Vlshape, order='F')
else:
vals = data.reshape(self.grid.Vlshape, order='F')
logSERIES('k, t', k, t)
if k is None:
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
try:
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
except OSError:
dset = self.tsf[key] # dset already exists
Cvals = vals.copy(order='C') # h5py requires C order
if self.rank_owns_file:
dset.write_direct(Cvals)
else:
dset[self.myslice] = Cvals
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
self.tsf.flush()
def store_slice(self, ranges, data, t, tol=1e-7):
shape = (self.grid.dof,) + tuple(
r[1] - r[0] for r in ranges
)
slc = (slice(0, None),) + tuple(
slice(*r) for r in ranges
)
vals = data.reshape(shape, order='F')
na, nb, ta, tb = self.find_time(t)
logSERIES('na, nb, ta, tb', na, nb, ta, tb)
if abs(t-ta) <= abs(tb-t):
n, tn = na, ta
else:
n, tn = nb, tb
if (
(not (t == 0.0 and tn == 0.0)) and
((self.sts.size <= n) or
(abs(t-tn)/max(abs(t), abs(tn)) > tol))
):
#
# New time point: append it to the lists
#
k = self.lastk + 1
self.lastk = k
self.ks = np.append(self.ks, k)
self.ts = np.append(self.ts, t)
key = 'data' + str(k)
dset = self.tsf.create_dataset(key, self.grid.Vlshape,
dtype=vals.dtype)
logSERIES('k, t', k, t)
dset.attrs['k'] = k
dset.attrs['t'] = t
self.sorted = False
else:
k = n
key = 'data' + str(k)
dset = self.tsf[key]
dset[slc] = vals
self.tsf.flush()
def times(self):
self._sort()
return self.ts
def steps(self):
self._sort()
return self.ks
def sorted_times(self):
self._sort()
return self.sts
def sorted_steps(self):
self._sort()
return self.order
def retrieve_by_number(self, k):
key = 'data' + str(k)
dset = self.tsf[key]
if self.rank_owns_file:
return np.array(dset)
else:
return np.array(dset)[self.myslice]
def find_time(self, t):
"""
Find the time points closest to t
Returns tuple (a, b, ta, tb)
a and b are the numbers (ints) of the points flanking t. ta
and tb (floats) are the corresponding times. If there is a
time point exactly matchig nt, than a == b, ta == tb == t.
"""
self._sort()
if self.sts.size == 0:
return (0, 0, t - 1.0, t - 1.0)
if (t <= self.sts[0]):
a = 0
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
elif (t >= self.sts[-1]):
a = len(self.sts) - 1
return (self.ks[a], self.ks[a], self.sts[a], self.sts[a])
else:
b = self.sts.searchsorted(t)
nb = self.order[b]
tb = self.sts[b]
if (b >= len(self.order) - 1):
return(b, b, self.sts[b], self.sts[b])
elif tb == t:
return(b, b, tb, tb)
a = b - 1
na = self.order[a]
ta = self.sts[a]
return (a, b, ta, tb)
def retrieve_by_time(self, t):
"""
Retrieve a time point.
Arguments:
t: the time to be retrieved.
"""
na, nb, ta, tb = self.find_time(t)
adata = self.retrieve_by_number(na)
if na == nb:
return adata
bdata = self.retrieve_by_number(nb)
data = ((t-ta)*bdata + (tb-t)*adata)/(tb-ta)
return(data)
class TimeSeries(KSFDTimeSeries):
def __init__(
self,
basename,
grid=None,
comm=None,
mpiok=False,
mode='r+',
retries=0,
retry_interval=60
):
"""
Open a KSFD.TimeSeries
Required parameters:
basename: the name of the TimeSeries. (This is a prefix of the
names of the HDF5 files in which data are stored.)
Optional parameters:
grid: The KSFD.Grid on which the PETSc Vecs to be saved are
defined. This must be supplied when creating a new
TimeSeries. When opening an existig nseries, it will be
read from the file if not supplied.
comm: the MPI communicator. (If not supplied, grid.comm is
used.)
mpiok=False: whether it is Ok to use parallel HDF5.
mode: the file mode (See h5py.h5File.)
retries=0. If nonzero, retry faile dopens this many times.
retry_interval=60: time (in secodns) between successive
retries. Note: the open will block while waiting for a
successful retry.
"""
if comm:
self._comm = comm
elif grid:
self._comm = grid.comm
else:
self._comm = MPI.COMM_SELF
self._mode = mode
self._size = self.comm.size
self._rank = self.comm.rank
self.mpiok = mpiok
super().__init__(basename, size=self.size, rank=self.rank,
mpiok=mpiok, mode=mode, retries=retries,
retry_interval=retry_interval)
if (grid):
self.set_grid(grid)
self.grid_save()
else:
self.grid_load()
class Gatherer(KSFDTimeSeries):
"""
Gatherer is a special-purpose iterator to allow a single
sequential process to read the separate files written by a
TimeSeries run under MPI. For instance, to reconstruct the global
vector at the last time (assuming it fits in memory in a single
process):
gather = Gatherer(basename='base', size=4)
grid = gather.grid
lastk = gather.sorted_steps()[-1]
vec = grid.Vdmda.createGlobalVec()
vecarray = vec.array.reshape(grid.globalVshape, order='F')
for series in gather:
vec = grid.Vdmda.createGlobalVec()
rank = series.rank
vecarray[series.slice] = series.retrieve_by_number(lastk)
<do something with vec...>
This gatherer would iterate through files bases4r0.h5,
bases4r1.h5, bases4r2.h5, and bases4r3.h5. Note that with every
iteration it closes the last file and opens the next. Thus, if you
want to iterate over all times, it is more efficient to nest the
loops like this:
for series in gather:
for t in series.times():
<do something for this file at this time)
than the other way. (The other way would be more intuitive, but my
expectation is that this class will be used mostly to gather all
TimeSeries files into a single file, which then can be processed
efficiently as a TimeSeries.)
"""
def __init__(
self,
basename,
size=None,
retries=0,
retry_interval=60
):
"""
Required positional parameter
basename: the prefix of the filenames for the TimeSeries being
read. As a convenience, this can be a special filename
that matches the regular expression '(.+)s(\d+)@.*' (That
is a literal '@'. Then the basename is the (.+) and the
size is the (\d+) following the 's' and preceding
'@'. For example, "bases4@' or 'bases4@.h5' would both
serve for a series with basename 'base' and size 4.
Optional keyword parameter:
size=None: This argument can be omitted only if the basename
has the special @ filename format. Otherwise, it must be
supplied.
Gatherer is read-only (mode 'r').
"""
self._comm = MPI.COMM_SELF
self.retries = retries
self.retry_interval = retry_interval
gatherre = '(.+)s(\d+)@.*'
fname_match = re.fullmatch(gatherre, basename)
if fname_match:
base = fname_match[1]
size = int(fname_match[2])
else:
base = basename
size = size
self.basename = base
if not isinstance(size, int) or size <= 0:
raise ValueError(
'size {size} is not a positive int'
)
#
# This opens the first file. We have to do that so as to read
# and initialize things like grid, times, etc.
#
super().__init__(
basename=base,
size=size,
rank=0,
mpiok=False,
mode='r',
retries=retries,
retry_interval=retry_interval
)
self.set_ranges()
#
# Since we have to open the rank 0 file before startig
# iteration, the following flag is used to determine whether
# to open a new file when __iter__ is called
#
self.iter_started = False
self.iter_stopped = False
def set_ranges(self):
self.rank_owns_file = True
gd = self.grid_read()
self.grid_load(gd)
self._ranges = gd['ranges']
self._shape = (self.dof,) + tuple(
r[1] - r[0] for r in self.ranges
)
self._slice = (slice(0, None),) + tuple(
slice(*r) for r in self.ranges
)
@property
def slice(self):
return self._slice
@property
def shape(self):
return self._shape
def __iter__(self):
return self
def __next__(self):
if self.iter_stopped:
#
# We previously exhausted the iteration. Restart it
#
self.tsf.close()
self.__init__(self.basename,
self.size,
retries=self.retries,
retry_interval=self.retry_interval
)
elif self.iter_started:
#
# We're not just starting: move on to next file
#
self.tsf.close()
self._rank = self.rank + 1
if self.rank >= self.size:
self.iter_stopped = True
raise StopIteration
super().__init__(
basename=self.basename,
size=self.size,
rank=self.rank,
mpiok=False,
mode='r',
retries=self.retries,
retry_interval=self.retry_interval
)
self.set_ranges()
self.iter_started = True
self.iter_stopped = False
return self
| 33.939759 | 80 | 0.540469 |
f71859cec54c2858e6e96dfaa122fa325313a2ed | 5,325 | py | Python | artellapipe/core/assetfile.py | ArtellaPipe/artellapipe | 3400f6a55f124f639143fe01c559059eaba23b22 | [
"MIT"
] | 7 | 2019-10-28T05:18:30.000Z | 2020-08-21T05:36:52.000Z | artellapipe/core/assetfile.py | tpoveda/artellapipe | 3400f6a55f124f639143fe01c559059eaba23b22 | [
"MIT"
] | 4 | 2020-01-22T02:41:54.000Z | 2020-03-17T10:49:12.000Z | artellapipe/core/assetfile.py | tpoveda/artellapipe | 3400f6a55f124f639143fe01c559059eaba23b22 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains implementations for asset files
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
import os
import logging
import tpDcc as tp
from tpDcc.libs.python import osplatform, path as path_utils
import artellapipe
from artellapipe.core import defines, file
LOGGER = logging.getLogger('artellapipe')
class ArtellaAssetFile(file.ArtellaFile, object):
def __init__(self, file_asset=None, file_path=None, file_name=None):
self._asset = file_asset
file_name = file_name or self._asset.get_name() if self._asset else None
super(ArtellaAssetFile, self).__init__(file_name=file_name, file_path=file_path)
@property
def asset(self):
"""
Returns asset linked to this file type
:return: ArtellaAssset
"""
return self._asset
def has_valid_object(self):
"""
Implements base ArtellaFile has_valid_object function
Returns whether valid object is attached to this file
:return: bool
"""
return bool(self._asset)
def get_template_dict(self, **kwargs):
"""
Returns dictionary with the template data for this file
:param extension: str
:return: dict
"""
template_dict = {
'project_id': self._project.id,
'project_id_number': self._project.id_number,
'asset_name': self._asset.get_name(),
'asset_type': self._asset.get_category(),
'file_extension': kwargs.get('extension', self.FILE_EXTENSIONS[0])
}
return template_dict
def get_project(self):
"""
Implements base ArtellaFile get_project function
Returns project where this asset file belongs to
:return: ArtellaProject
"""
return self._asset.project
def get_file(
self, status=defines.ArtellaFileStatus.WORKING, extension=None, fix_path=False, version=None, **kwargs):
"""
Implements base ArtellaFile get_file function
Returns file of the attached object
:param file_type: str
:param status: str
:param extension: str
:param fix_path: bool
:param version: str
:return: str
"""
template_dict = self.get_template_dict()
return self._asset.get_file(
file_type=self.FILE_TYPE, status=status, extension=extension, fix_path=fix_path,
version=version, extra_dict=template_dict)
def get_path(self):
"""
Implements base ArtellaFile get_path function
Returns path of the attached object
:return: str
"""
return self._asset.get_path()
def get_name(self):
"""
Returns name of the attached object
:return: str
"""
return self._asset.get_name()
def get_extension(self):
"""
Returns the extension of the aseet file
:return: str
"""
return self.get_project().assets_library_file_types.get()
def get_latest_published_versions(self):
"""
Implements base ArtellaFile get_path function
Returns latest published version of file
:return: str
"""
file_path = self.get_path()
return artellapipe.AssetsMgr().get_latest_published_versions(file_path, file_type=self.FILE_TYPE)
def get_file_paths(self, return_first=False, fix_path=True, **kwargs):
if self.FILE_TYPE not in self._asset.FILES:
LOGGER.warning(
'FileType "{}" is not a valid file for Assets of type "{}"'.format(
self.FILE_TYPE, self._asset.FILE_TYPE))
return list()
file_paths = super(
ArtellaAssetFile, self).get_file_paths(return_first=return_first, fix_path=fix_path, **kwargs)
if file_paths:
return file_paths
status = kwargs.get('status', defines.ArtellaFileStatus.PUBLISHED)
if status == defines.ArtellaFileStatus.WORKING:
file_path = self.get_working_path()
else:
file_path = self.get_latest_local_published_path()
if not file_path:
return None if return_first else file_paths
if fix_path:
file_path = artellapipe.FilesMgr().fix_path(file_path)
if return_first:
return file_path
else:
return [file_path]
def _open_file(self, file_path):
if file_path and os.path.isfile(file_path):
if path_utils.clean_path(tp.Dcc.scene_name()) == path_utils.clean_path(file_path):
return True
tp.Dcc.open_file(file_path)
return True
elif file_path and os.path.isdir(file_path):
osplatform.open_file(file_path)
return True
else:
if file_path:
folder_path = os.path.dirname(file_path)
if os.path.isdir(folder_path):
osplatform.open_file(folder_path)
return True
LOGGER.warning('Impossible to open file: "{}"'.format(file_path))
return False
| 29.583333 | 116 | 0.624977 |
f71860515aa0c48a7527206271305a67a617026e | 5,375 | py | Python | entropylab/instruments/tests/test_qcodes_dummy.py | IgorQM/entropy | 8cbd3da356d8196e89eb9d810e643c80d6608481 | [
"BSD-3-Clause"
] | null | null | null | entropylab/instruments/tests/test_qcodes_dummy.py | IgorQM/entropy | 8cbd3da356d8196e89eb9d810e643c80d6608481 | [
"BSD-3-Clause"
] | null | null | null | entropylab/instruments/tests/test_qcodes_dummy.py | IgorQM/entropy | 8cbd3da356d8196e89eb9d810e643c80d6608481 | [
"BSD-3-Clause"
] | null | null | null | from typing import Optional, Dict, Any
import pytest
@pytest.mark.skip()
def test_qcodes_dummy():
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
class QcodesDummy(QcodesAdapter):
def __init__(self):
super().__init__(MockQcodesDriver, "QcodesDummy")
def revert_to_snapshot(self, snapshot: str):
pass
dummy = QcodesDummy()
print(dummy)
dummy.connect()
instance = dummy.get_instance()
instance.set("s", "printed")
instance.free_function()
instance.set("g", "g")
assert instance.get("s") == "printed"
assert instance.get("g") == 1
dummy.teardown()
@pytest.mark.skip()
def test_qcodes_dummy_object():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
dummy.connect()
instance = dummy.get_instance()
instance.set("s", "printed")
instance.free_function()
instance.set("g", "g")
assert instance.get("s") == "printed"
assert instance.get("g") == 1
dummy.teardown()
@pytest.mark.skip()
def test_qcodes_dummy_object_dynamic_spec():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
driver_spec = dummy.get_dynamic_driver_specs()
print(driver_spec)
assert len(driver_spec.parameters) == 3
assert driver_spec.parameters[0].name == "p"
assert driver_spec.parameters[1].name == "s"
assert driver_spec.parameters[2].name == "g"
assert len(driver_spec.functions) == 0
assert len(driver_spec.undeclared_functions) == 3
assert driver_spec.undeclared_functions[0].name == "free_function"
@pytest.mark.skip()
def test_qcodes_dummy_snapshot():
# Importing in test so general pytest discovery wont enforce qcodes installation
from qcodes.instrument.base import InstrumentBase as qcodes_InstrumentBase
from entropylab.instruments.qcodes_adapter import QcodesAdapter
class MockQcodesDriver(qcodes_InstrumentBase):
def __init__(
self, name: str, metadata: Optional[Dict[Any, Any]] = None
) -> None:
super().__init__(name, metadata)
self.add_parameter("p")
setter = lambda val: print(val)
getter = lambda: 1
self.add_parameter("s", set_cmd=self.setter, get_cmd=self.getter)
self.add_parameter("g", set_cmd=setter, get_cmd=getter)
def setter(self, val):
print(val)
self.s = val
def getter(self):
return self.s
def free_function(self):
print("i'm free")
dummy = QcodesAdapter(MockQcodesDriver, "dummy_inst")
dummy.connect()
snapshot = dummy.snapshot(True)
print(snapshot)
assert len(snapshot) > 0
| 33.59375 | 85 | 0.616744 |
f7188fdf67798b6b524e83b8873542b335c4a5b4 | 8,649 | py | Python | acq4/devices/PatchStar/patchstar.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | acq4/devices/PatchStar/patchstar.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | acq4/devices/PatchStar/patchstar.py | tropp/ACQ4 | 792e05e99cedfc175593d200aeabecd6fa6304ce | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import numpy as np
from PyQt4 import QtGui, QtCore
from ..Stage import Stage, MoveFuture, StageInterface
from acq4.drivers.PatchStar import PatchStar as PatchStarDriver
from acq4.util.Mutex import Mutex
from acq4.util.Thread import Thread
from acq4.pyqtgraph import debug, ptime, SpinBox
class PatchStar(Stage):
"""
A Scientifica PatchStar manipulator.
port: <serial port> # eg. 'COM1' or '/dev/ttyACM0'
"""
def __init__(self, man, config, name):
self.port = config.pop('port')
self.scale = config.pop('scale', (1e-7, 1e-7, 1e-7))
self.dev = PatchStarDriver(self.port)
self._lastMove = None
man.sigAbortAll.connect(self.stop)
Stage.__init__(self, man, config, name)
# clear cached position for this device and re-read to generate an initial position update
self._lastPos = None
self.getPosition(refresh=True)
self.setUserSpeed(3e-3)
# Set scaling for each axis
self.dev.send('UUX 6.4')
self.dev.send('UUY 6.4')
self.dev.send('UUZ 6.4')
# makes 1 roe turn == 1 second movement for any speed
self.dev.send('JS 200')
# Set approach angle
self.dev.send('ANGLE %f' % self.pitch)
self.dev.send('APPROACH 0')
# thread for polling position changes
self.monitor = MonitorThread(self)
self.monitor.start()
def capabilities(self):
"""Return a structure describing the capabilities of this device"""
if 'capabilities' in self.config:
return self.config['capabilities']
else:
return {
'getPos': (True, True, True),
'setPos': (True, True, True),
'limits': (False, False, False),
}
def stop(self):
"""Stop the manipulator immediately.
"""
with self.lock:
self.dev.stop()
if self._lastMove is not None:
self._lastMove._stopped()
self._lastMove = None
def setUserSpeed(self, v):
"""Set the speed of the rotary controller (m/turn).
"""
self.userSpeed = v
self.dev.setSpeed(v / self.scale[0])
def _getPosition(self):
# Called by superclass when user requests position refresh
with self.lock:
pos = self.dev.getPos()
pos = [pos[i] * self.scale[i] for i in (0, 1, 2)]
if pos != self._lastPos:
self._lastPos = pos
emit = True
else:
emit = False
if emit:
# don't emit signal while locked
self.posChanged(pos)
return pos
def targetPosition(self):
with self.lock:
if self._lastMove is None or self._lastMove.isDone():
return self.getPosition()
else:
return self._lastMove.targetPos
def quit(self):
self.monitor.stop()
Stage.quit(self)
def _move(self, abs, rel, speed, linear):
with self.lock:
if self._lastMove is not None and not self._lastMove.isDone():
self.stop()
pos = self._toAbsolutePosition(abs, rel)
self._lastMove = PatchStarMoveFuture(self, pos, speed, self.userSpeed)
return self._lastMove
def deviceInterface(self, win):
return PatchStarGUI(self, win)
class MonitorThread(Thread):
"""Thread to poll for manipulator position changes.
"""
def __init__(self, dev):
self.dev = dev
self.lock = Mutex(recursive=True)
self.stopped = False
self.interval = 0.3
Thread.__init__(self)
def start(self):
self.stopped = False
Thread.start(self)
def stop(self):
with self.lock:
self.stopped = True
def setInterval(self, i):
with self.lock:
self.interval = i
def run(self):
minInterval = 100e-3
interval = minInterval
lastPos = None
while True:
try:
with self.lock:
if self.stopped:
break
maxInterval = self.interval
pos = self.dev._getPosition() # this causes sigPositionChanged to be emitted
if pos != lastPos:
# if there was a change, then loop more rapidly for a short time.
interval = minInterval
lastPos = pos
else:
interval = min(maxInterval, interval*2)
time.sleep(interval)
except:
debug.printExc('Error in PatchStar monitor thread:')
time.sleep(maxInterval)
class PatchStarMoveFuture(MoveFuture):
"""Provides access to a move-in-progress on a PatchStar manipulator.
"""
def __init__(self, dev, pos, speed, userSpeed):
MoveFuture.__init__(self, dev, pos, speed)
self._interrupted = False
self._errorMSg = None
self._finished = False
pos = (np.array(pos) / np.array(self.dev.scale)).astype(int)
if speed == 'fast':
speed = 1e-3
elif speed == 'slow':
speed = 1e-6
with self.dev.dev.lock:
self.dev.dev.moveTo(pos, speed / self.dev.scale[0])
# reset to user speed immediately after starting move
# (the move itself will run with the previous speed)
self.dev.dev.setSpeed(userSpeed / self.dev.scale[0])
def wasInterrupted(self):
"""Return True if the move was interrupted before completing.
"""
return self._interrupted
def isDone(self):
"""Return True if the move is complete.
"""
return self._getStatus() != 0
def _getStatus(self):
# check status of move unless we already know it is complete.
# 0: still moving; 1: finished successfully; -1: finished unsuccessfully
if self._finished:
if self._interrupted:
return -1
else:
return 1
if self.dev.dev.isMoving():
# Still moving
return 0
# did we reach target?
pos = self.dev._getPosition()
if ((np.array(pos) - np.array(self.targetPos))**2).sum()**0.5 < 1e-6:
# reached target
self._finished = True
return 1
else:
# missed
self._finished = True
self._interrupted = True
self._errorMsg = "Move did not complete."
return -1
def _stopped(self):
# Called when the manipulator is stopped, possibly interrupting this move.
status = self._getStatus()
if status == 1:
# finished; ignore stop
return
elif status == -1:
self._errorMsg = "Move was interrupted before completion."
elif status == 0:
# not actually stopped! This should not happen.
raise RuntimeError("Interrupted move but manipulator is still running!")
else:
raise Exception("Unknown status: %s" % status)
def errorMessage(self):
return self._errorMsg
class PatchStarGUI(StageInterface):
def __init__(self, dev, win):
StageInterface.__init__(self, dev, win)
# Insert patchstar-specific controls into GUI
self.psGroup = QtGui.QGroupBox('PatchStar Rotary Controller')
self.layout.addWidget(self.psGroup, self.nextRow, 0, 1, 2)
self.nextRow += 1
self.psLayout = QtGui.QGridLayout()
self.psGroup.setLayout(self.psLayout)
self.speedLabel = QtGui.QLabel('Speed')
self.speedSpin = SpinBox(value=self.dev.userSpeed, suffix='m/turn', siPrefix=True, dec=True, limits=[1e-6, 10e-3])
self.revXBtn = QtGui.QPushButton('Reverse X')
self.revYBtn = QtGui.QPushButton('Reverse Y')
self.revZBtn = QtGui.QPushButton('Reverse Z')
self.psLayout.addWidget(self.speedLabel, 0, 0)
self.psLayout.addWidget(self.speedSpin, 0, 1)
self.psLayout.addWidget(self.revXBtn, 1, 1)
self.psLayout.addWidget(self.revYBtn, 2, 1)
self.psLayout.addWidget(self.revZBtn, 3, 1)
self.revXBtn.clicked.connect(lambda: self.dev.dev.send('JDX'))
self.revYBtn.clicked.connect(lambda: self.dev.dev.send('JDY'))
self.revZBtn.clicked.connect(lambda: self.dev.dev.send('JDZ'))
self.speedSpin.valueChanged.connect(lambda v: self.dev.setDefaultSpeed(v))
| 33.01145 | 122 | 0.575327 |
f71898c3ed083524faabeea56c687bae2ca86d8e | 807 | py | Python | src/python/pants/backend/python/rules/setup_py_util_test.py | mpopenko-exos/pants | 47d27037c8b13291fc9023e56ddd1b1defdf1b8e | [
"Apache-2.0"
] | null | null | null | src/python/pants/backend/python/rules/setup_py_util_test.py | mpopenko-exos/pants | 47d27037c8b13291fc9023e56ddd1b1defdf1b8e | [
"Apache-2.0"
] | 1 | 2018-09-04T17:37:34.000Z | 2018-09-04T19:42:58.000Z | src/python/pants/backend/python/rules/setup_py_util_test.py | mpopenko-exos/pants | 47d27037c8b13291fc9023e56ddd1b1defdf1b8e | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.rules.setup_py_util import distutils_repr
testdata = {
'foo': 'bar',
'baz': {
'qux': [123, 456],
'quux': ('abc', b'xyz'),
'corge': {1, 2, 3}
},
'various_strings': [
"x'y",
'aaa\nbbb'
]
}
expected = """
{
'foo': 'bar',
'baz': {
'qux': [
123,
456,
],
'quux': (
'abc',
'xyz',
),
'corge': {
1,
2,
3,
},
},
'various_strings': [
'x\\\'y',
\"\"\"aaa\nbbb\"\"\",
],
}
""".strip()
def test_distutils_repr():
assert expected == distutils_repr(testdata)
| 16.469388 | 67 | 0.448575 |
f718a3bfab465e69af8eecdfc4731de81a8437f6 | 3,893 | py | Python | deletionwatcher.py | Floern/SmokeDetector | 2818bbd23af15440836c61c4023d063264433c66 | [
"Apache-2.0",
"MIT"
] | null | null | null | deletionwatcher.py | Floern/SmokeDetector | 2818bbd23af15440836c61c4023d063264433c66 | [
"Apache-2.0",
"MIT"
] | null | null | null | deletionwatcher.py | Floern/SmokeDetector | 2818bbd23af15440836c61c4023d063264433c66 | [
"Apache-2.0",
"MIT"
] | 1 | 2018-10-11T13:41:49.000Z | 2018-10-11T13:41:49.000Z | # coding=utf-8
import json
import requests
import time
# noinspection PyPackageRequirements
import websocket
# noinspection PyPackageRequirements
from bs4 import BeautifulSoup
from threading import Thread
from urllib.parse import urlparse
import metasmoke
from globalvars import GlobalVars
import datahandling
# noinspection PyClassHasNoInit,PyBroadException,PyMethodParameters
class DeletionWatcher:
@classmethod
def update_site_id_list(self):
soup = BeautifulSoup(requests.get("https://meta.stackexchange.com/topbar/site-switcher/site-list").text,
"html.parser")
site_id_dict = {}
for site in soup.findAll("a", attrs={"data-id": True}):
site_name = urlparse(site["href"]).netloc
site_id = site["data-id"]
site_id_dict[site_name] = site_id
GlobalVars.site_id_dict = site_id_dict
@classmethod
def check_websocket_for_deletion(self, post_site_id, post_url, timeout):
time_to_check = time.time() + timeout
post_id = post_site_id[0]
post_type = post_site_id[2]
if post_type == "answer":
question_id = str(datahandling.get_post_site_id_link(post_site_id))
if question_id is None:
return
else:
question_id = post_id
post_site = post_site_id[1]
if post_site not in GlobalVars.site_id_dict:
return
site_id = GlobalVars.site_id_dict[post_site]
ws = websocket.create_connection("wss://qa.sockets.stackexchange.com/")
ws.send(site_id + "-question-" + question_id)
while time.time() < time_to_check:
ws.settimeout(time_to_check - time.time())
try:
a = ws.recv()
except websocket.WebSocketTimeoutException:
t_metasmoke = Thread(name="metasmoke send deletion stats",
target=metasmoke.Metasmoke.send_deletion_stats_for_post, args=(post_url, False))
t_metasmoke.start()
return False
if a is not None and a != "":
try:
action = json.loads(a)["action"]
if action == "hb":
ws.send("hb")
continue
else:
d = json.loads(json.loads(a)["data"])
except:
continue
if d["a"] == "post-deleted" and str(d["qId"]) == question_id \
and ((post_type == "answer" and "aId" in d and str(d["aId"]) == post_id) or
post_type == "question"):
t_metasmoke = Thread(name="metasmoke send deletion stats",
target=metasmoke.Metasmoke.send_deletion_stats_for_post, args=(post_url, True))
t_metasmoke.start()
return True
t_metasmoke = Thread(name="metasmoke send deletion stats",
target=metasmoke.Metasmoke.send_deletion_stats_for_post, args=(post_url, False))
t_metasmoke.start()
return False
@classmethod
def check_if_report_was_deleted(self, post_site_id, post_url, message):
was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 1200)
if was_report_deleted:
try:
message.delete()
except:
pass
@classmethod
def post_message_if_not_deleted(self, post_site_id, post_url, message_text, room):
was_report_deleted = self.check_websocket_for_deletion(post_site_id, post_url, 300)
if not was_report_deleted and not datahandling.is_false_positive(post_site_id[0:2]) and not \
datahandling.is_ignored_post(post_site_id[0:2]):
room.send_message(message_text)
| 40.552083 | 120 | 0.599281 |
f718be3ab1e8857e704777f735842ba57cdcf3f2 | 27,925 | py | Python | sdk/communication/azure-communication-chat/tests/test_chat_thread_client_async.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/communication/azure-communication-chat/tests/test_chat_thread_client_async.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/communication/azure-communication-chat/tests/test_chat_thread_client_async.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from azure.core.credentials import AccessToken
from datetime import datetime
from msrest.serialization import TZ_UTC
from azure.communication.chat.aio import ChatThreadClient
from azure.communication.chat import (
ChatParticipant,
ChatMessageType
)
from azure.communication.chat._shared.models import(
CommunicationUserIdentifier
)
from unittest_helpers import mock_response
from azure.core.exceptions import HttpResponseError
from unittest.mock import Mock, patch
import pytest
import time
import calendar
def _convert_datetime_to_utc_int(input):
return int(calendar.timegm(input.utctimetuple()))
async def mock_get_token():
return AccessToken("some_token", _convert_datetime_to_utc_int(datetime.now().replace(tzinfo=TZ_UTC)))
credential = Mock(get_token=mock_get_token)
@pytest.mark.asyncio
async def test_update_topic():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
topic = "update topic"
try:
await chat_thread_client.update_topic(topic=topic)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={"id": message_id})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
create_message_result_id = None
try:
content='hello world'
sender_display_name='sender name'
metadata={ "tags": "tag" }
create_message_result = await chat_thread_client.send_message(
content,
sender_display_name=sender_display_name,
metadata=metadata)
create_message_result_id = create_message_result.id
except:
raised = True
assert raised == False
assert create_message_result_id == message_id
@pytest.mark.asyncio
async def test_send_message_w_type():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
message_str = "Hi I am Bob."
create_message_result_id = None
chat_message_types = [ChatMessageType.TEXT, ChatMessageType.HTML, "text", "html"]
for chat_message_type in chat_message_types:
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={
"id": message_id,
"type": chat_message_type,
"sequenceId": "3",
"version": message_id,
"content": {
"message": message_str,
"topic": "Lunch Chat thread",
"participants": [
{
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b",
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiator": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderId": "8:acs:46849534-eb08-4ab7-bde7-c36928cd1547_00000007-e155-1f06-1db7-3a3a0d00004b"
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
content='hello world'
sender_display_name='sender name'
create_message_result = await chat_thread_client.send_message(
content,
chat_message_type=chat_message_type,
sender_display_name=sender_display_name)
create_message_result_id = create_message_result.id
except:
raised = True
assert raised == False
assert create_message_result_id == message_id
@pytest.mark.asyncio
async def test_send_message_w_invalid_type_throws_error():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
# the payload is irrelevant - it'll fail before
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={"id": message_id})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
create_message_result_id = None
chat_message_types = [ChatMessageType.PARTICIPANT_ADDED, ChatMessageType.PARTICIPANT_REMOVED,
ChatMessageType.TOPIC_UPDATED, "participant_added", "participant_removed", "topic_updated",
"ChatMessageType.TEXT", "ChatMessageType.HTML",
"ChatMessageType.PARTICIPANT_ADDED", "ChatMessageType.PARTICIPANT_REMOVED",
"ChatMessageType.TOPIC_UPDATED"]
for chat_message_type in chat_message_types:
try:
content='hello world'
sender_display_name='sender name'
create_message_result = await chat_thread_client.send_message(
content,
chat_message_type=chat_message_type,
sender_display_name=sender_display_name)
except:
raised = True
assert raised == True
@pytest.mark.asyncio
async def test_get_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
message_str = "Hi I am Bob."
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"id": message_id,
"type": "text",
"sequenceId": "3",
"version": message_id,
"content": {
"message": message_str,
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z",
"metadata": {
"tags": "tag"
}
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
message = None
try:
message = await chat_thread_client.get_message(message_id)
except:
raised = True
assert raised == False
assert message.id == message_id
assert message.type == ChatMessageType.TEXT
assert message.content.message == message_str
assert message.metadata["tags"] == "tag"
assert len(message.content.participants) > 0
@pytest.mark.asyncio
async def test_list_messages():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [{
"id": message_id,
"type": "text",
"sequenceId": "3",
"version": message_id,
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
}]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_messages = None
try:
chat_messages = chat_thread_client.list_messages(results_per_page=1)
except:
raised = True
assert raised == False
items = []
async for item in chat_messages:
items.append(item)
assert len(items) == 1
assert items[0].id == message_id
@pytest.mark.asyncio
async def test_list_messages_with_start_time():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"id": "message_id_1",
"type": "text",
"sequenceId": "3",
"version": "message_id_1",
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
},
{
"id": "message_id_2",
"type": "text",
"sequenceId": "3",
"version": "message_id_2",
"content": {
"message": "message_str",
"topic": "Lunch Chat thread",
"participants": [
{
"communicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
],
"initiatorCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}}
},
"senderDisplayName": "Bob",
"createdOn": "2021-01-27T01:37:33Z",
"senderCommunicationIdentifier": {"rawId": "string", "communicationUser": {
"id": "8:acs:8540c0de-899f-5cce-acb5-3ec493af3800_0e59221d-0c1d-46ae-9544-c963ce56c10b"}},
"deletedOn": "2021-01-27T01:37:33Z",
"editedOn": "2021-01-27T01:37:33Z"
}]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_messages = None
try:
chat_messages = chat_thread_client.list_messages(
start_time=datetime(2020, 8, 17, 18, 0, 0)
)
except:
raised = True
assert raised == False
items = []
async for item in chat_messages:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_update_message_content():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
content = "updated message content"
await chat_thread_client.update_message(message_id, content=content)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_update_message_metadata():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
metadata={ "tags": "tag" }
await chat_thread_client.update_message(message_id, metadata=metadata)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_delete_message():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id='1596823919339'
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.delete_message(message_id)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_list_participants():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [
{
"communicationIdentifier": {
"rawId": participant_id,
"communicationUser": {
"id": participant_id
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_thread_participants = None
try:
chat_thread_participants = chat_thread_client.list_participants()
except:
raised = True
assert raised == False
items = []
async for item in chat_thread_participants:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_list_participants_with_results_per_page():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id_1 = "8:acs:9b665d53-8164-4923-ad5d-5e983b07d2e7_00000006-5399-552c-b274-5a3a0d0000dc"
participant_id_2 = "8:acs:9b665d53-8164-4923-ad5d-5e983b07d2e7_00000006-9d32-35c9-557d-5a3a0d0002f1"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"communicationIdentifier": {
"rawId": participant_id_1,
"communicationUser": {
"id": participant_id_1
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
},
{
"communicationIdentifier": {
"rawId": participant_id_2,
"communicationUser": {
"id": participant_id_2
}
},
"displayName": "Bob",
"shareHistoryTime": "2020-10-30T10:50:50Z"
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
chat_thread_participants = None
try:
chat_thread_participants = chat_thread_client.list_participants(results_per_page=2)
except:
raised = True
assert raised == False
items = []
async for item in chat_thread_participants:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_add_participants():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
new_participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=201)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
new_participant = ChatParticipant(
identifier=CommunicationUserIdentifier(new_participant_id),
display_name='name',
share_history_time=datetime.utcnow())
participants = [new_participant]
try:
await chat_thread_client.add_participants(participants)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_add_participants_w_failed_participants_returns_nonempty_list():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
new_participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
error_message = "some error message"
async def mock_send(*_, **__):
return mock_response(status_code=201, json_payload={
"invalidParticipants": [
{
"code": "string",
"message": error_message,
"target": new_participant_id,
"details": []
}
]
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
new_participant = ChatParticipant(
identifier=CommunicationUserIdentifier(new_participant_id),
display_name='name',
share_history_time=datetime.utcnow())
participants = [new_participant]
try:
result = await chat_thread_client.add_participants(participants)
except:
raised = True
assert raised == False
assert len(result) == 1
failed_participant = result[0][0]
communication_error = result[0][1]
assert new_participant.identifier.properties['id'] == failed_participant.identifier.properties['id']
assert new_participant.display_name == failed_participant.display_name
assert new_participant.share_history_time == failed_participant.share_history_time
assert error_message == communication_error.message
@pytest.mark.asyncio
async def test_remove_participant():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
participant_id="8:acs:57b9bac9-df6c-4d39-a73b-26e944adf6ea_9b0110-08007f1041"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=204)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.remove_participant(identifier=CommunicationUserIdentifier(participant_id))
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_typing_notification():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_typing_notification()
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_typing_notification_with_sender_display_name():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_typing_notification(sender_display_name="John")
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_send_read_receipt():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id="1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200)
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
try:
await chat_thread_client.send_read_receipt(message_id)
except:
raised = True
assert raised == False
@pytest.mark.asyncio
async def test_list_read_receipts():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id="1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={"value": [
{
"chatMessageId": message_id,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts()
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_list_read_receipts_with_results_per_page():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id_1 = "1596823919339"
message_id_2 = "1596823919340"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"chatMessageId": message_id_1,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
},
{
"chatMessageId": message_id_2,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts(results_per_page=2)
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 2
@pytest.mark.asyncio
async def test_list_read_receipts_with_results_per_page_and_skip():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
message_id_1 = "1596823919339"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"value": [
{
"chatMessageId": message_id_1,
"senderCommunicationIdentifier": {
"rawId": "string",
"communicationUser": {
"id": "string"
}
}
}
]})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
read_receipts = None
try:
read_receipts = chat_thread_client.list_read_receipts(results_per_page=1, skip=1)
except:
raised = True
assert raised == False
items = []
async for item in read_receipts:
items.append(item)
assert len(items) == 1
@pytest.mark.asyncio
async def test_get_properties():
thread_id = "19:bcaebfba0d314c2aa3e920d38fa3df08@thread.v2"
raised = False
async def mock_send(*_, **__):
return mock_response(status_code=200, json_payload={
"id": thread_id,
"topic": "Lunch Chat thread",
"createdOn": "2020-10-30T10:50:50Z",
"deletedOn": "2020-10-30T10:50:50Z",
"createdByCommunicationIdentifier": {"rawId": "string", "communicationUser": {"id": "string"}}
})
chat_thread_client = ChatThreadClient("https://endpoint", credential, thread_id, transport=Mock(send=mock_send))
get_thread_result = None
try:
get_thread_result = await chat_thread_client.get_properties()
except:
raised = True
assert raised == False
assert get_thread_result.id == thread_id
| 37.134309 | 126 | 0.582775 |
f718d25705647be878e1d7696bde13ea0e8f11b5 | 138 | py | Python | testproj/testproj/testapp/admin.py | Polyconseil/django-select2-rocks | 0cc29af55cdd7bec7da773966bec0da84fa7aa6c | [
"BSD-2-Clause"
] | 6 | 2015-09-03T09:01:46.000Z | 2021-01-28T20:15:18.000Z | testproj/testproj/testapp/admin.py | Polyconseil/django-select2-rocks | 0cc29af55cdd7bec7da773966bec0da84fa7aa6c | [
"BSD-2-Clause"
] | 7 | 2015-06-04T14:48:20.000Z | 2018-02-28T09:53:03.000Z | testproj/testproj/testapp/admin.py | Polyconseil/django-select2-rocks | 0cc29af55cdd7bec7da773966bec0da84fa7aa6c | [
"BSD-2-Clause"
] | 3 | 2015-04-05T14:20:10.000Z | 2016-09-30T17:02:01.000Z | from django.contrib import admin
from .models import Beach, SelectedBeach
admin.site.register(Beach)
admin.site.register(SelectedBeach)
| 19.714286 | 40 | 0.826087 |
f718ea668f086785a2ecdd071fd77637573089f4 | 53,142 | py | Python | discretisedfield/field.py | minrk/discretisedfield | 251584f8d976a7fafdff5402d16327489407c4dd | [
"BSD-3-Clause"
] | null | null | null | discretisedfield/field.py | minrk/discretisedfield | 251584f8d976a7fafdff5402d16327489407c4dd | [
"BSD-3-Clause"
] | null | null | null | discretisedfield/field.py | minrk/discretisedfield | 251584f8d976a7fafdff5402d16327489407c4dd | [
"BSD-3-Clause"
] | null | null | null | import pyvtk
import struct
import matplotlib
import numpy as np
import mpl_toolkits.axes_grid1
import discretisedfield as df
import ubermagutil.typesystem as ts
import discretisedfield.util as dfu
import matplotlib.pyplot as plt
@ts.typesystem(mesh=ts.Typed(expected_type=df.Mesh),
dim=ts.Scalar(expected_type=int, unsigned=True, const=True),
name=ts.Name(const=True))
class Field:
"""Finite difference field.
This class defines a finite difference field and enables certain
operations for its analysis and visualisation. The field is
defined on a finite difference mesh (`discretisedfield.Mesh`).
Parameters
----------
mesh : discretisedfield.Mesh
Finite difference rectangular mesh.
dim : int, optional
Dimension of the field value. For instance, if `dim=3` the
field is a three-dimensional vector field and for `dim=1`
the field is a scalar field. Defaults to `dim=3`.
value : array_like, callable, optional
Please refer to the `value` property:
:py:func:`~discretisedfield.Field.value`. Defaults to 0,
meaning that if the value is not provided in the
initialisation process, "zero-field" will be defined.
norm : numbers.Real, callable, optional
Please refer to the `norm` property:
:py:func:`~discretisedfield.Field.norm`. Defaults to `None`
(`norm=None` defines no norm).
name : str, optional
Field name (defaults to `'field'`). The field name must be a
valid Python variable name string. More specifically, it must
not contain spaces, or start with underscore or numeric
character.
Examples
--------
1. Creating a uniform three-dimensional vector field on a
nano-sized thin film.
>>> import discretisedfield as df
...
>>> p1 = (-50e-9, -25e-9, 0)
>>> p2 = (50e-9, 25e-9, 5e-9)
>>> cell = (1e-9, 1e-9, 0.1e-9)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
...
>>> dim = 3
>>> value = (0, 0, 1)
>>> field = df.Field(mesh=mesh, dim=dim, value=value)
>>> field
Field(mesh=...)
2. Creating a scalar field.
>>> import discretisedfield as df
...
>>> p1 = (-10, -10, -10)
>>> p2 = (10, 10, 10)
>>> n = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> dim = 1
>>> value = 3.14
>>> field = df.Field(mesh=mesh, dim=dim, value=value)
>>> field
Field(mesh=...)
.. seealso:: :py:func:`~discretisedfield.Mesh`
"""
def __init__(self, mesh, dim=3, value=0, norm=None, name='field'):
self.mesh = mesh
self.dim = dim
self.value = value
self.norm = norm
self.name = name
@property
def value(self):
"""Field value representation.
This propertry returns a representation of the field value if
it exists. Otherwise, the `numpy.ndarray` containing all
values from the field is returned.
Parameters
----------
value : 0, array_like, callable
For scalar fields (`dim=1`) `numbers.Real` values are
allowed. In the case of vector fields, "array_like" (list,
tuple, numpy.ndarray) value with length equal to `dim`
should be used. Finally, the value can also be a callable
(e.g. Python function or another field), which for every
coordinate in the mesh returns a valid value. If
`value=0`, all values in the field will be set to zero
independent of the field dimension.
Returns
-------
array_like, callable, numbers.Real
The value used (representation) for setting the field is
returned. However, if the actual value of the field does
not correspond to the initially used value anymore, a
`numpy.ndarray` is returned containing all field values.
Raises
------
ValueError
If unsupported type is passed
Examples
--------
1. Different ways of setting and getting the field value.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> value = (0, 0, 1)
>>> # if value is not specified, zero-field is defined
>>> field = df.Field(mesh=mesh, dim=3)
>>> field.value
0
>>> field.value = (0, 0, 1)
>>> field.value
(0, 0, 1)
>>> # Setting the field value using a Python function (callable).
>>> def value_function(pos):
... x, y, z = pos
... if x <= 1:
... return (0, 0, 1)
... else:
... return (0, 0, -1)
>>> field.value = value_function
>>> field.value
<function value_function at ...>
>>> # We now change the value of a single cell so that the
>>> # representation used for initialising field is not valid
>>> # anymore.
>>> field.array[0, 0, 0, :] = (0, 0, 0)
>>> field.value
array(...)
.. seealso:: :py:func:`~discretisedfield.Field.array`
"""
value_array = dfu.as_array(self.mesh, self.dim, self._value)
if np.array_equal(self.array, value_array):
return self._value
else:
return self.array
@value.setter
def value(self, val):
self._value = val
self.array = dfu.as_array(self.mesh, self.dim, val)
@property
def array(self):
"""Numpy array of a field value.
`array` has shape of `(self.mesh.n[0], self.mesh.n[1],
self.mesh.n[2], dim)`.
Parameters
----------
array : numpy.ndarray
Numpy array with dimensions `(self.mesh.n[0],
self.mesh.n[1], self.mesh.n[2], dim)`
Returns
-------
numpy.ndarray
Field values array.
Raises
------
ValueError
If setting the array with wrong type, shape, or value.
Examples
--------
1. Accessing and setting the field array.
>>> import discretisedfield as df
>>> import numpy as np
...
>>> p1 = (0, 0, 0)
>>> p2 = (1, 1, 1)
>>> cell = (0.5, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> value = (0, 0, 1)
>>> field = df.Field(mesh=mesh, dim=3, value=value)
>>> field.array
array(...)
>>> field.array.shape
(2, 1, 1, 3)
>>> field.array = np.ones(field.array.shape)
>>> field.array
array(...)
.. seealso:: :py:func:`~discretisedfield.Field.value`
"""
return self._array
@array.setter
def array(self, val):
if isinstance(val, np.ndarray) and \
val.shape == self.mesh.n + (self.dim,):
self._array = val
else:
msg = (f'Unsupported type(val)={type(val)} '
'or invalid value dimensions.')
raise ValueError(msg)
@property
def norm(self):
"""Norm of a field.
This property computes the norm of the field and returns it as
a `discretisedfield.Field` object with `dim=1`. Norm of a
scalar field cannot be set and `ValueError` is raised.
Parameters
----------
numbers.Real, numpy.ndarray
Norm value
Returns
-------
discretisedfield.Field
Scalar field with norm values.
Raises
------
ValueError
If setting the norm with wrong type, shape, or value. In
addition, if the field is scalar (dim=1) or it contains
zero vector values.
Examples
--------
1. Manipulating the field norm
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (1, 1, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))
>>> field.norm
Field(...)
>>> field.norm = 2
>>> field.norm
Field(...)
>>> field.value = (1, 0, 0)
>>> field.norm.array
array([[[[1.]]]])
"""
current_norm = np.linalg.norm(self.array, axis=-1)[..., None]
return Field(self.mesh, dim=1, value=current_norm, name='norm')
@norm.setter
def norm(self, val):
if val is not None:
if self.dim == 1:
msg = f'Cannot set norm for field with dim={self.dim}.'
raise ValueError(msg)
if not np.all(self.norm.array):
msg = 'Cannot normalise field with zero values.'
raise ValueError(msg)
self.array /= self.norm.array # normalise to 1
self.array *= dfu.as_array(self.mesh, dim=1, val=val)
@property
def average(self):
"""Field average.
It computes the average of the field over the entire volume of
the mesh.
Returns
-------
tuple
Field average tuple whose length equals to the field's
dimension.
Examples
--------
1. Computing the vector field average.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (5, 5, 5)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field1 = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))
>>> field1.average
(0.0, 0.0, 1.0)
>>> field2 = df.Field(mesh=mesh, dim=1, value=55)
>>> field2.average
(55.0,)
"""
return tuple(self.array.mean(axis=(0, 1, 2)))
def __repr__(self):
"""Field representation string.
This method returns the string that can ideally be copied in
another Python script so that exactly the same field object
could be defined. However, this is usually not the case due to
complex values used.
Returns
-------
str
Field representation string.
Example
-------
1. Getting field representation string.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=1, value=1)
>>> repr(field)
"Field(mesh=...)"
"""
return (f'Field(mesh={repr(self.mesh)}, '
f'dim={self.dim}, name=\'{self.name}\')')
def __call__(self, point):
"""Sample the field at `point`.
It returns the value of the discreatisation cell `point`
belongs to. It always returns a tuple, whose length is the
same as the dimension of the field.
Parameters
----------
point : (3,) array_like
The mesh point coordinate :math:`(p_{x}, p_{y}, p_{z})`.
Returns
-------
tuple
A tuple, whose length is the same as the dimension of the
field.
Example
-------
1. Sampling the field value
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (20, 20, 20)
>>> n = (20, 20, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 3, 4))
>>> point = (10, 2, 3)
>>> field(point)
(1.0, 3.0, 4.0)
"""
value = self.array[self.mesh.point2index(point)]
if self.dim > 1:
value = tuple(value)
return value
def __getattr__(self, name):
"""Extracting the component of the vector field.
If `'x'`, `'y'`, or `'z'` is accessed, a new scalar field of
that component will be returned. This method is effective for
vector fields with dimension 2 or 3.
Returns
-------
discretisedfield.Field
Scalar field with vector field component values.
Examples
--------
1. Accessing the vector field components.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 2)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh=mesh, dim=3, value=(0, 0, 1))
>>> field.x
Field(...)
>>> field.y
Field(...)
>>> field.z
Field(...)
>>> field.z.dim
1
"""
if name in list(dfu.axesdict.keys())[:self.dim] and 1 < self.dim <= 3:
# Components x, y, and z make sense only for vector fields
# with typical dimensions 2 and 3.
component_array = self.array[..., dfu.axesdict[name]][..., None]
fieldname = f'{self.name}-{name}'.format(self.name, name)
return Field(mesh=self.mesh, dim=1,
value=component_array, name=fieldname)
else:
msg = f'{type(self).__name__} object has no attribute {name}.'
raise AttributeError(msg.format(type(self).__name__, name))
def __dir__(self):
"""Extension of the tab-completion list.
Adds `'x'`, `'y'`, and `'z'`, depending on the dimension of
the field, to the tab-completion list. This is effective in
IPython or Jupyter notebook environment.
"""
if 1 < self.dim <= 3:
extension = list(dfu.axesdict.keys())[:self.dim]
else:
extension = []
return list(self.__dict__.keys()) + extension
def __iter__(self):
"""Generator yielding coordinates and values of all field cells.
The discretisation cell coordinate corresponds to the cell
centre point.
Yields
------
tuple (2,)
The first value is the mesh cell coordinates (`px`, `py`,
`pz`), whereas the second one is the field value.
Examples
--------
1. Iterating through the field coordinates and values
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 1)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=3, value=(0, 0, 1))
>>> for coord, value in field:
... print (coord, value)
(0.5, 0.5, 0.5) (0.0, 0.0, 1.0)
(1.5, 0.5, 0.5) (0.0, 0.0, 1.0)
(0.5, 1.5, 0.5) (0.0, 0.0, 1.0)
(1.5, 1.5, 0.5) (0.0, 0.0, 1.0)
.. seealso:: :py:func:`~discretisedfield.Mesh.indices`
"""
for point in self.mesh.coordinates:
yield point, self.__call__(point)
def line(self, p1, p2, n=100):
"""Sampling the field along the line.
Given two points :math:`p_{1}` and :math:`p_{2}`, :math:`n`
position coordinates are generated and the corresponding field
values.
.. math::
\\mathbf{r}_{i} = i\\frac{\\mathbf{p}_{2} -
\\mathbf{p}_{1}}{n-1}
Parameters
----------
p1, p2 : (3,) array_like
Two points between which the line is generated.
n : int
Number of points on the line.
Yields
------
tuple
The first element is the coordinate of the point on the
line, whereas the second one is the value of the field.
Raises
------
ValueError
If `p1` or `p2` is outside the mesh domain.
Examples
--------
1. Sampling the field along the line.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 2)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=2, value=(0, 3))
>>> for coord, value in field.line(p1=(0, 0, 0), p2=(2, 0, 0), n=3):
... print(coord, value)
(0.0, 0.0, 0.0) (0.0, 3.0)
(1.0, 0.0, 0.0) (0.0, 3.0)
(2.0, 0.0, 0.0) (0.0, 3.0)
"""
for point in self.mesh.line(p1=p1, p2=p2, n=n):
yield point, self.__call__(point)
def plane(self, *args, n=None, **kwargs):
"""Slices the field with a plane.
If one of the axes (`'x'`, `'y'`, or `'z'`) is passed as a
string, a plane perpendicular to that axis is generated which
intersects the field at its centre. Alternatively, if a keyword
argument is passed (e.g. `x=1`), a plane perpendicular to the
x-axis and intersecting it at x=1 is generated. The number of
points in two dimensions on the plane can be defined using `n`
(e.g. `n=(10, 15)`). Using the generated plane, a new
"two-dimensional" field is created and returned.
Parameters
----------
n : tuple of length 2
The number of points on the plane in two dimensions
Returns
------
discretisedfield.Field
A field obtained as an intersection of mesh and the plane.
Example
-------
1. Intersecting the field with a plane.
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (2, 2, 2)
>>> cell = (1, 1, 1)
>>> mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
>>> field = df.Field(mesh, dim=3)
>>> field.plane(y=1)
Field(mesh=...)
"""
plane_mesh = self.mesh.plane(*args, n=n, **kwargs)
return self.__class__(plane_mesh, dim=self.dim, value=self)
def write(self, filename, representation='txt', extend_scalar=False):
"""Write the field in .ovf, .omf, .ohf, or vtk format.
If the extension of the `filename` is `.vtk`, a VTK file is
written
(:py:func:`~discretisedfield.Field._writevtk`). Otherwise, for
`.ovf`, `.omf`, or `.ohf` extensions, an OOMMF file is written
(:py:func:`~discretisedfield.Field._writeovf`). The
representation (`bin4`, 'bin8', or 'txt') is passed using
`representation` argument.
Parameters
----------
filename : str
Name of the file written. It depends on its extension the
format it is going to be written as.
representation : str
In the case of OOMMF files (`.ovf`, `.omf`, or `.ohf`),
representation can be specified (`bin4`, `bin8`, or
`txt`). Defaults to 'txt'.
extend_scalar : bool
If True, a scalar field will be saved as a vector
field. More precisely, if the value at a cell is 3, that
cell will be saved as (3, 0, 0). This is valid only for
the OVF file formats.
Example
-------
1. Write an .omf file and delete it from the disk
>>> import os
>>> import discretisedfield as df
...
>>> p1 = (0, 0, -5)
>>> p2 = (5, 15, 15)
>>> n = (5, 15, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, value=(5, 6, 7))
>>> filename = 'mytestfile.omf'
>>> field.write(filename) # write the file
>>> os.remove(filename) # delete the file
.. seealso:: :py:func:`~discretisedfield.Field.fromfile`
"""
if any([filename.endswith(ext) for ext in ['.omf', '.ovf', '.ohf']]):
self._writeovf(filename, representation=representation,
extend_scalar=extend_scalar)
elif filename.endswith('.vtk'):
self._writevtk(filename)
else:
msg = ('Allowed extensions for writing the field are '
'.omf, .ovf, .ohf, and .vtk.')
raise ValueError(msg)
def _writeovf(self, filename, representation='txt', extend_scalar=False):
"""Write the field in .ovf, .omf, or .ohf format.
The extension of the `filename` should be `.ovf`, `.omf`, or
`.ohf`. The representation (`bin4`, 'bin8', or 'txt') is
passed using `representation` argument.
Parameters
----------
filename : str
Name of the file written.
representation : str
Representation of the file (`bin4`, `bin8`, or
`txt`). Defaults to 'txt'.
extend_scalar : bool
If True, a scalar field will be saved as a vector
field. More precisely, if the value at a cell is 3, that
cell will be saved as (3, 0, 0).
Example
-------
1. Write an .omf file and delete it from the disk
>>> import os
>>> import discretisedfield as df
...
>>> p1 = (0, 0, -5)
>>> p2 = (5, 15, 15)
>>> n = (5, 15, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, value=(5, 6, 7))
>>> filename = 'mytestfile.omf'
>>> field._writeovf(filename) # write the file
>>> os.remove(filename) # delete the file
"""
if extend_scalar and self.dim == 1:
write_dim = 3
else:
write_dim = self.dim
header = ['OOMMF OVF 2.0',
'',
'Segment count: 1',
'',
'Begin: Segment',
'Begin: Header',
'',
'Title: Field generated omf file',
'Desc: File generated by Field class',
'meshunit: m',
'meshtype: rectangular',
f'xbase: {self.mesh.pmin[0] + self.mesh.cell[0]/2}',
f'ybase: {self.mesh.pmin[1] + self.mesh.cell[1]/2}',
f'zbase: {self.mesh.pmin[2] + self.mesh.cell[2]/2}',
f'xnodes: {self.mesh.n[0]}',
f'ynodes: {self.mesh.n[1]}',
f'znodes: {self.mesh.n[2]}',
f'xstepsize: {self.mesh.cell[0]}',
f'ystepsize: {self.mesh.cell[1]}',
f'zstepsize: {self.mesh.cell[2]}',
f'xmin: {self.mesh.pmin[0]}',
f'ymin: {self.mesh.pmin[1]}',
f'zmin: {self.mesh.pmin[2]}',
f'xmax: {self.mesh.pmax[0]}',
f'ymax: {self.mesh.pmax[1]}',
f'zmax: {self.mesh.pmax[2]}',
f'valuedim: {write_dim}',
f'valuelabels: {self.name}_x {self.name}_y {self.name}_z',
'valueunits: A/m A/m A/m',
'',
'End: Header',
'']
if representation == 'bin4':
header.append('Begin: Data Binary 4')
footer = ['End: Data Binary 4',
'End: Segment']
elif representation == 'bin8':
header.append('Begin: Data Binary 8')
footer = ['End: Data Binary 8',
'End: Segment']
elif representation == 'txt':
header.append('Begin: Data Text')
footer = ['End: Data Text',
'End: Segment']
# Write header lines to the ovf file.
f = open(filename, 'w')
f.write(''.join(map(lambda line: f'# {line}\n', header)))
f.close()
binary_reps = {'bin4': (1234567.0, 'f'),
'bin8': (123456789012345.0, 'd')}
if representation in binary_reps:
# Reopen the file with binary write, appending to the end
# of the file.
f = open(filename, 'ab')
# Add the 8 bit binary check value that OOMMF uses.
packarray = [binary_reps[representation][0]]
# Write data to the ovf file.
for i in self.mesh.indices:
for vi in self.array[i]:
packarray.append(vi)
v_bin = struct.pack(binary_reps[representation][1]*len(packarray),
*packarray)
f.write(v_bin)
f.close()
else:
# Reopen the file for txt representation, appending to the
# file.
f = open(filename, 'a')
for i in self.mesh.indices:
if self.dim == 3:
v = [vi for vi in self.array[i]]
elif self.dim == 1:
if extend_scalar:
v = [self.array[i][0], 0.0, 0.0]
else:
v = [self.array[i][0]]
else:
msg = (f'Cannot write dim={self.dim} field.')
raise TypeError(msg)
for vi in v:
f.write(' ' + str(vi))
f.write('\n')
f.close()
# Write footer lines to OOMMF file.
f = open(filename, 'a')
f.write(''.join(map(lambda line: f'# {line}\n', footer)))
f.close()
def _writevtk(self, filename):
"""Write the field in the VTK format.
The extension of the `filename` should be `.vtk`.
Parameters
----------
filename : str
Name of the file written.
Example
-------
1. Write a .vtk file and delete it from the disk
>>> import os
>>> import discretisedfield as df
...
>>> p1 = (0, 0, -5)
>>> p2 = (5, 15, 15)
>>> n = (5, 15, 20)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, value=(5, 6, 7))
>>> filename = 'mytestfile.vtk'
>>> field._writevtk(filename) # write the file
>>> os.remove(filename) # delete the file
"""
grid = [pmini + np.linspace(0, li, ni+1) for pmini, li, ni in
zip(self.mesh.pmin, self.mesh.l, self.mesh.n)]
structure = pyvtk.RectilinearGrid(*grid)
vtkdata = pyvtk.VtkData(structure)
vectors = [self.__call__(coord) for coord in self.mesh.coordinates]
vtkdata.cell_data.append(pyvtk.Vectors(vectors, self.name))
for i, component in enumerate(dfu.axesdict.keys()):
name = f'{self.name}_{component}'
vtkdata.cell_data.append(pyvtk.Scalars(list(zip(*vectors))[i],
name))
vtkdata.tofile(filename)
@classmethod
def fromfile(cls, filename, norm=None, name='field'):
"""Read the field from .ovf, .omf, or .ohf file.
The extension of the `filename` should be `.ovf`, `.omf`, or
`.ohf`. If the field should be normalised, `norm` argument can
be passed. The `name` of the field defaults to `'field'`. This
is a `classmethod` and should be called as
`discretisedfield.Field.fromfile('myfile.omf')`.
Parameters
----------
filename : str
Name of the file to be read.
norm : numbers.Real, numpy.ndarray, callable
For details, refer to :py:func:`~discretisedfield.Field.value`.
name : str
Name of the field read.
Returns
-------
discretisedfield.Field
Example
-------
1. Read a field from the .ovf file
>>> import os
>>> import discretisedfield as df
...
>>> ovffile = os.path.join(os.path.dirname(__file__),
... 'tests', 'test_sample',
... 'mumax-output-linux.ovf')
>>> field = df.Field.fromfile(ovffile)
>>> field
Field(mesh=...)
.. seealso:: :py:func:`~discretisedfield.Field.write`
"""
mdatalist = ['xmin', 'ymin', 'zmin', 'xmax', 'ymax', 'zmax',
'xstepsize', 'ystepsize', 'zstepsize', 'valuedim']
mdatadict = dict()
try:
with open(filename, 'r', encoding='utf-8') as ovffile:
f = ovffile.read()
lines = f.split('\n')
mdatalines = filter(lambda s: s.startswith('#'), lines)
datalines = np.loadtxt(filter(lambda s: not s.startswith('#'),
lines))
for line in mdatalines:
for mdatum in mdatalist:
if mdatum in line:
mdatadict[mdatum] = float(line.split()[-1])
break
except UnicodeDecodeError:
with open(filename, 'rb') as ovffile:
f = ovffile.read()
lines = f.split(b'\n')
mdatalines = filter(lambda s: s.startswith(bytes('#', 'utf-8')),
lines)
for line in mdatalines:
for mdatum in mdatalist:
if bytes(mdatum, 'utf-8') in line:
mdatadict[mdatum] = float(line.split()[-1])
break
header = b'# Begin: Data Binary '
data_start = f.find(header)
header = f[data_start:data_start + len(header) + 1]
data_start += len(b'# Begin: Data Binary 8')
data_end = f.find(b'# End: Data Binary ')
# ordered by length
newlines = [b'\n\r', b'\r\n', b'\n']
for nl in newlines:
if f.startswith(nl, data_start):
data_start += len(nl)
break
if b'4' in header:
formatstr = '@f'
checkvalue = 1234567.0
elif b'8' in header:
formatstr = '@d'
checkvalue = 123456789012345.0
listdata = list(struct.iter_unpack(formatstr,
f[data_start:data_end]))
datalines = np.array(listdata)
if datalines[0] != checkvalue:
# These two lines cannot be accessed via
# tests. Therefore, they are excluded from coverage.
msg = 'Binary Data cannot be read.' # pragma: no cover
raise AssertionError(msg) # pragma: no cover
datalines = datalines[1:] # check value removal
p1 = (mdatadict[key] for key in ['xmin', 'ymin', 'zmin'])
p2 = (mdatadict[key] for key in ['xmax', 'ymax', 'zmax'])
cell = (mdatadict[key] for key in ['xstepsize', 'ystepsize',
'zstepsize'])
dim = int(mdatadict['valuedim'])
mesh = df.Mesh(p1=p1, p2=p2, cell=cell)
field = df.Field(mesh, dim=dim, name=name)
r_tuple = tuple(reversed(field.mesh.n)) + (int(mdatadict['valuedim']),)
t_tuple = tuple(reversed(range(3))) + (3,)
field.array = datalines.reshape(r_tuple).transpose(t_tuple)
field.norm = norm # Normalise if norm is passed
return field
def mpl(self, figsize=None):
"""Plots a field plane using matplotlib.
Before the field can be plotted, it must be sliced with a
plane (e.g. `field.plane(`z`)`). Otherwise, ValueError is
raised. For vector fields, this method plots both `quiver`
(vector) and `imshow` (scalar) plots. The `imshow` plot
represents the value of the out-of-plane vector component and
the `quiver` plot is not coloured. On the other hand, only
`imshow` is plotted for scalar fields. Where the norm of the
field is zero, no vectors are shown and those `imshow` pixels
are not coloured. In order to use this function inside Jupyter
notebook `%matplotlib inline` must be activated after
`discretisedfield` is imported.
Parameters
----------
figsize : tuple, optional
Length-2 tuple passed to the `matplotlib.figure` function.
Raises
------
ValueError
If the field has not been sliced with a plane.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> field.plane(z=50, n=(5, 5)).mpl()
.. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`
"""
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using mpl. '
'For instance, field.plane(\'x\').mpl().')
raise ValueError(msg)
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111)
planeaxis = dfu.raxesdict[self.mesh.info['planeaxis']]
if self.dim > 1:
# Vector field has both quiver and imshow plots.
self.quiver(ax=ax, headwidth=5)
scfield = getattr(self, planeaxis)
coloredplot = scfield.imshow(ax=ax, norm_field=self.norm)
else:
# Scalar field has only imshow.
scfield = self
coloredplot = scfield.imshow(ax=ax, norm_field=None)
# Add colorbar to imshow plot.
cbar = self.colorbar(ax, coloredplot)
# Add labels.
ax.set_xlabel(dfu.raxesdict[self.mesh.info['axis1']])
ax.set_ylabel(dfu.raxesdict[self.mesh.info['axis2']])
if self.dim > 1:
cbar.ax.set_ylabel(planeaxis + ' component')
def imshow(self, ax, norm_field=None, **kwargs):
"""Plots a scalar field plane using `matplotlib.pyplot.imshow`.
Before the field can be plotted, it must be sliced with a
plane (e.g. `field.plane(`y`)`) and field must be of dimension
1 (scalar field). Otherwise, ValueError is raised. `imshow`
adds the plot to `matplotlib.axes.Axes` passed via `ax`
argument. If the scalar field plotted is extracted from a
vector field, which has coordinates where the norm of the
field is zero, the norm of that vector field can be passed
using `norm_field` argument, so that pixels at those
coordinates are not coloured. All other parameters accepted by
`matplotlib.pyplot.imshow` can be passed. In order to use this
function inside Jupyter notebook `%matplotlib inline` must be
activated after `discretisedfield` is imported.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to which the scalar plot will be added.
norm_field : discretisedfield.Field, optional
A (scalar) norm field used for determining whether certain
pixels should be coloured.
Returns
-------
matplotlib.image.AxesImage object
Raises
------
ValueError
If the field has not been sliced with a plane or its
dimension is not 1.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=1, value=2)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> field.plane('y').imshow(ax=ax)
<matplotlib.image.AxesImage object at ...>
.. seealso:: :py:func:`~discretisedfield.Field.quiver`
"""
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using imshow. '
'For instance, field.plane(\'x\').imshow(ax=ax).')
raise ValueError(msg)
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.imshow(ax=ax) '
'or norm field.norm.imshow(ax=ax).')
raise ValueError(msg)
points, values = list(zip(*list(self)))
# If norm_field is passed, set values where norm=0 to np.nan,
# so that they are not plotted.
if norm_field is not None:
values = list(values) # make values mutable
for i, point in enumerate(points):
if norm_field(point) == 0:
values[i] = np.nan
# "Unpack" values inside arrays.
values = [v[0] if not np.isnan(v) else v for v in values]
else:
# "Unpack" values inside arrays.
values = list(zip(*values))
points = list(zip(*points))
extent = [self.mesh.pmin[self.mesh.info['axis1']],
self.mesh.pmax[self.mesh.info['axis1']],
self.mesh.pmin[self.mesh.info['axis2']],
self.mesh.pmax[self.mesh.info['axis2']]]
n = (self.mesh.n[self.mesh.info['axis2']],
self.mesh.n[self.mesh.info['axis1']])
imax = ax.imshow(np.array(values).reshape(n), origin='lower',
extent=extent, **kwargs)
return imax
def quiver(self, ax=None, color_field=None, **kwargs):
"""Plots a vector field plane using `matplotlib.pyplot.quiver`.
Before the field can be plotted, it must be sliced with a
plane (e.g. `field.plane(`y`)`) and field must be of dimension
3 (vector field). Otherwise, ValueError is raised. `quiver`
adds the plot to `matplotlib.axes.Axes` passed via `ax`
argument. If there are coordinates where the norm of the field
is zero, vectors are not plotted at those coordinates. By
default, plot is not coloured, but by passing a
`discretisedfield.Field` object of dimension 1 as
`color_field`, quiver plot will be coloured based on the
values from the field. All other parameters accepted by
`matplotlib.pyplot.quiver` can be passed. In order to use this
function inside Jupyter notebook `%matplotlib inline` must be
activated after `discretisedfield` is imported.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to which the quiver plot will be added.
color_field : discretisedfield.Field, optional
A (scalar) field used for determining the colour of the
quiver plot.
Returns
-------
matplotlib.quiver.Quiver object
Raises
------
ValueError
If the field has not been sliced with a plane or its
dimension is not 3.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> field.plane(z=50).quiver(ax=ax, color_field=field.z)
<matplotlib.quiver.Quiver object at ...>
.. seealso:: :py:func:`~discretisedfield.Field.imshow`
"""
if not hasattr(self.mesh, 'info'):
msg = ('Only sliced field can be plotted using quiver. '
'For instance, field.plane(\'x\').quiver(ax=ax).')
raise ValueError(msg)
if self.dim != 3:
msg = 'Only three-dimensional (dim=3) fields can be plotted.'
raise ValueError(msg)
points, values = list(zip(*list(self)))
# Remove values where norm is 0
points, values = list(points), list(values) # make them mutable
points = [p for p, v in zip(points, values)
if not np.equal(v, 0).all()]
values = [v for v in values if not np.equal(v, 0).all()]
if color_field is not None:
colors = [color_field(p) for p in points]
colors = list(zip(*colors))
# "Unpack" values inside arrays.
points, values = list(zip(*points)), list(zip(*values))
# Are there any vectors pointing out-of-plane? If yes, set the scale.
if not any(values[self.mesh.info['axis1']] +
values[self.mesh.info['axis2']]):
kwargs['scale'] = 1
kwargs['pivot'] = 'mid' # arrow at the centre of the cell
if color_field is None:
# quiver plot is not coloured.
qvax = ax.quiver(points[self.mesh.info['axis1']],
points[self.mesh.info['axis2']],
values[self.mesh.info['axis1']],
values[self.mesh.info['axis2']],
**kwargs)
else:
# quiver plot is coloured.
qvax = ax.quiver(points[self.mesh.info['axis1']],
points[self.mesh.info['axis2']],
values[self.mesh.info['axis1']],
values[self.mesh.info['axis2']],
colors,
**kwargs)
return qvax
def colorbar(self, ax, coloredplot, cax=None, **kwargs):
"""Adds a colorbar to the axes using `matplotlib.pyplot.colorbar`.
Axes to which the colorbar should be added is passed via `ax`
argument. If the colorbar axes are made before the method is
called, they should be passed as `cax`. The plot to which the
colorbar should correspond to is passed via `coloredplot`. All
other parameters accepted by `matplotlib.pyplot.colorbar` can
be passed. In order to use this function inside Jupyter
notebook `%matplotlib inline` must be activated after
`discretisedfield` is imported.
Parameters
----------
ax : matplotlib.axes.Axes
Axes object to which the colorbar will be added.
coloredplot : matplotlib.quiver.Quiver, matplotlib.image.AxesImage
A plot to which the colorbar should correspond
cax : matplotlib.axes.Axes, optional
Colorbar axes.
Returns
-------
matplotlib.colorbar.Colorbar
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (0, 0, 0)
>>> p2 = (100, 100, 100)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> coloredplot = field.plane(z=50).quiver(ax=ax, color_field=field.z)
>>> field.colorbar(ax=ax, coloredplot=coloredplot)
<matplotlib.colorbar.Colorbar object at ...>
"""
if cax is None:
divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.1)
cbar = plt.colorbar(coloredplot, cax=cax, **kwargs)
return cbar
def k3d_nonzero(self, color=dfu.colormap[0], plot=None, **kwargs):
"""Plots the voxels where the value of a scalar field is nonzero.
All mesh cells where the value of the field is not zero will
be marked using the same color. Only scalar fields can be
plotted. Otherwise, ValueError is raised. Different colour of
voxels can be passed in the RGB format using `color`
parameter. This function is often used to look at the defined
sample in the finite difference mesh, by inspecting its norm
(`field.norm.k3d_nonzero`). If `plot` is passed as a
`k3d.plot.Plot`, plot is added to it. Otherwise, a new k3d
plot is created. All arguments allowed in `k3d.voxels()` can
be passed. This function is to be called in Jupyter notebook.
Parameters
----------
color : int/hex, optional
Voxel color in hexadecimal format.
plot : k3d.plot.Plot, optional
If this argument is passed, plot is added to
it. Otherwise, a new k3d plot is created.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> def normfun(pos):
... x, y, z = pos
... if x**2 + y**2 < 30**2:
... return 1
... else:
... return 0
>>> field.norm = normfun
>>> field.norm.k3d_nonzero()
Plot(...)
.. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`
"""
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.k3d_nonzero() '
'or norm field.norm.k3d_nonzero().')
raise ValueError(msg)
plot_array = np.copy(self.array) # make a deep copy
plot_array = np.squeeze(plot_array) # remove an empty dimension
plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)
plot_array[plot_array != 0] = 1 # all cells have the same colour
# In the case of nano-sized samples, fix the order of
# magnitude of the plot extent to avoid freezing the k3d plot.
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
pmin = np.divide(self.mesh.pmin, 1e-9)
pmax = np.divide(self.mesh.pmax, 1e-9)
else:
pmin = self.mesh.pmin
pmax = self.mesh.pmax
dfu.voxels(plot_array, pmin, pmax, colormap=color,
plot=plot, **kwargs)
def k3d_voxels(self, norm_field=None, plot=None, **kwargs):
"""Plots the scalar field as a coloured `k3d.voxels()` plot.
At all mesh cells, a voxel will be plotted anc coloured
according to its value. If the scalar field plotted is
extracted from a vector field, which has coordinates where the
norm of the field is zero, the norm of that vector field can
be passed using `norm_field` argument, so that voxels at those
coordinates are not showed. Only scalar fields can be
plotted. Otherwise, ValueError is raised. If `plot` is passed
as a `k3d.plot.Plot`, plot is added to it. Otherwise, a new
k3d plot is created. All arguments allowed in `k3d.voxels()`
can be passed. This function is to be called in Jupyter
notebook.
Parameters
----------
norm_field : discretisedfield.Field, optional
A (scalar) norm field used for determining whether certain
voxels should be plotted.
plot : k3d.plot.Plot, optional
If this argument is passed, plot is added to
it. Otherwise, a new k3d plot is created.
Example
-------
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> def normfun(pos):
... x, y, z = pos
... if x**2 + y**2 < 30**2:
... return 1
... else:
... return 0
>>> field.norm = normfun
>>> field.x.k3d_voxels(norm_field=field.norm)
Plot(...)
.. seealso:: :py:func:`~discretisedfield.Field.k3d_vectors`
"""
if self.dim > 1:
msg = ('Only scalar (dim=1) fields can be plotted. Consider '
'plotting one component, e.g. field.x.k3d_nonzero() '
'or norm field.norm.k3d_nonzero().')
raise ValueError(msg)
plot_array = np.copy(self.array) # make a deep copy
plot_array = plot_array[..., 0] # remove an empty dimension
plot_array -= plot_array.min()
# In the case of uniform fields, division by zero can be
# encountered.
if plot_array.max() != 0:
plot_array /= plot_array.max()
plot_array *= 254
plot_array += 1
plot_array = plot_array.round()
plot_array = plot_array.astype(int)
if norm_field is not None:
for index in self.mesh.indices:
if norm_field(self.mesh.index2point(index)) == 0:
plot_array[index] = 0
plot_array = np.swapaxes(plot_array, 0, 2) # k3d: arrays are (z, y, x)
cmap = matplotlib.cm.get_cmap('viridis', 256)
colormap = [dfu.num2hexcolor(i, cmap) for i in range(cmap.N)]
# In the case of nano-sized samples, fix the order of
# magnitude of the plot extent to avoid freezing the k3d plot.
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
pmin = np.divide(self.mesh.pmin, 1e-9)
pmax = np.divide(self.mesh.pmax, 1e-9)
else:
pmin = self.mesh.pmin
pmax = self.mesh.pmax
dfu.voxels(plot_array, pmin, pmax, colormap=colormap,
plot=plot, **kwargs)
def k3d_vectors(self, color_field=None, points=True, plot=None, **kwargs):
"""Plots the vector field as a `k3d.vectors()` plot.
At all mesh cells, a vector will be plotted if its norm is not
zero. Vectors can be coloured according to the values of the
scalar field passed as `color_field`. Only vector fields can
be plotted. Otherwise, ValueError is raised. Points at the
discretisation cell centres can be added by setting
`points=True`. If `plot` is passed as a `k3d.plot.Plot`, plot
is added to it. Otherwise, a new k3d plot is created. All
arguments allowed in `k3d.vectors()` can be passed. This
function is to be called in Jupyter notebook.
Parameters
----------
color_field : discretisedfield.Field, optional
A (scalar) field used for determining the colours of
vectors.
points : bool, optional
If `True`, points will be added to the discretisation cell
centres.
plot : k3d.plot.Plot, optional
If this argument is passed, plot is added to
it. Otherwise, a new k3d plot is created.
Example
-------
1. Plotting an entire vector field.
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> field.k3d_vectors(color_field=field.x)
Plot(...)
2. Plotting the slice of a vector field.
>>> import discretisedfield as df
...
>>> p1 = (-50, -50, -50)
>>> p2 = (50, 50, 50)
>>> n = (10, 10, 10)
>>> mesh = df.Mesh(p1=p1, p2=p2, n=n)
>>> field = df.Field(mesh, dim=3, value=(1, 2, 0))
>>> field.plane('x').k3d_vectors(color_field=field.x)
Plot(...)
.. seealso:: :py:func:`~discretisedfield.Field.k3d_voxels`
"""
if self.dim != 3:
msg = 'Only three-dimensional (dim=3) fields can be plotted.'
raise ValueError(msg)
coordinates, vectors, color_values = [], [], []
norm = self.norm # assigned to be computed only once
for coord, value in self:
if norm(coord) > 0:
coordinates.append(coord)
vectors.append(value)
if color_field is not None:
color_values.append(color_field(coord)[0])
coordinates, vectors = np.array(coordinates), np.array(vectors)
# In the case of nano-sized samples, fix the order of
# magnitude of the coordinates to avoid freezing the k3d plot.
if np.any(np.divide(self.mesh.cell, 1e-9) < 1e3):
coordinates /= 1e-9
cell = np.divide(self.mesh.cell, 1e-9)
else:
cell = self.mesh.cell
# Scale the vectors to correspond to the size of cells.
vectors /= vectors.max()
vectors *= 0.8*np.array(cell)
# Middle of the arrow is at the cell centre.
coordinates -= 0.5 * vectors
if color_field is not None:
color_values = np.array(color_values)
color_values -= color_values.min()
# In the case of uniform fields, division by zero can be
# encountered.
if color_values.max() != 0:
color_values /= color_values.max()
color_values *= 256
color_values = color_values.round()
color_values = color_values.astype(int)
cmap = matplotlib.cm.get_cmap('viridis', 256)
colors = []
for c in color_values:
color = dfu.num2hexcolor(c, cmap)
colors.append((color, color))
else:
colors = []
plot = dfu.vectors(coordinates, vectors, colors=colors,
plot=plot, **kwargs)
if points:
dfu.points(coordinates + 0.5 * vectors, plot=plot)
| 35.333777 | 79 | 0.527003 |
f7190ba74292947809c2128ff0aaecac93157a21 | 815 | py | Python | src/configs/model_id_opts.py | rgalhama/public_ICCM2021 | 6a528a26c649da0843b7acbc785aa99b80d29a74 | [
"MIT"
] | null | null | null | src/configs/model_id_opts.py | rgalhama/public_ICCM2021 | 6a528a26c649da0843b7acbc785aa99b80d29a74 | [
"MIT"
] | null | null | null | src/configs/model_id_opts.py | rgalhama/public_ICCM2021 | 6a528a26c649da0843b7acbc785aa99b80d29a74 | [
"MIT"
] | null | null | null | """
Author : Raquel G. Alhama
Desc:
"""
def strid_to_opts(strid):
"""
Given model id as string, extract parameter dictionary.
Reverse of config_loader.opts2strid
:param strid:
:return:
"""
raise NotImplementedError
#Method not finished
parts = strid.split("_")
param_keys=",".split("thr,win,dim,neg,dim,size,eig,neg,dyn,cds") #finish
d={}
for i,part in enumerate(parts):
if part == 'post':
pass
elif part in param_keys:
if i<len(parts) and not parts[i+1] not in param_keys:
k=part
v=parts[i+1]
d[k]=v
else: #key without value
k=part
v=1
d[k]=v
else: #value
pass
return d
# for p in parts: | 22.638889 | 76 | 0.516564 |
f71916c16a3387a714ba74da62f20782e4f9fe3d | 7,539 | py | Python | core/views.py | ICFL-UP/Yrden | 88c421f1b391e9a6943455b05b8f397e9023187b | [
"MIT"
] | null | null | null | core/views.py | ICFL-UP/Yrden | 88c421f1b391e9a6943455b05b8f397e9023187b | [
"MIT"
] | 6 | 2022-02-16T06:08:43.000Z | 2022-02-16T06:08:55.000Z | core/views.py | ICFL-UP/Yrden | 88c421f1b391e9a6943455b05b8f397e9023187b | [
"MIT"
] | null | null | null | import logging
import os
import json
import shutil
import threading
from typing import Any, List
from django.contrib.auth import login
from django.forms.models import BaseModelForm
from django.http.request import HttpRequest
from django.http.response import HttpResponse
from django.views.generic import ListView, DetailView, CreateView
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse_lazy
from django.views.generic.edit import DeleteView
from django.shortcuts import redirect, render
from django.urls import reverse
from django.utils import timezone
from datetime import datetime
from django.contrib.auth.mixins import LoginRequiredMixin
from core.utils import build_zip_json, create_venv, extract_zip, get_python_choices, write_log
from core.models import Plugin, PluginRun
from core.forms import NewUserForm, PluginFormSet, PluginSourceForm
from core.enums.log_type_enum import LogType
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-9s) %(message)s',)
def register_request(request: HttpRequest):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect(reverse("core:index"))
form = NewUserForm()
return render(request=request, template_name="registration/register.html", context={"register_form":form})
class PluginIndexView(LoginRequiredMixin, ListView):
model = Plugin
template_name = 'core/index.html'
context_object_name = 'plugins'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginIndexView, self).get_context_data(**kwargs)
plugins = self.get_queryset()
page = self.request.GET.get('page')
paginator = Paginator(plugins, self.paginate_by)
try:
plugins = paginator.page(page)
except PageNotAnInteger:
plugins = paginator.page(1)
except EmptyPage:
plugins = paginator.page(paginator.num_pages)
context['plugins'] = plugins
return context
class PluginDetailView(LoginRequiredMixin, DetailView):
model = Plugin
template_name = 'core/plugin_detail.html'
context_object_name = 'plugin'
paginate_by = 5
def get_context_data(self, **kwargs):
context = super(PluginDetailView, self).get_context_data(**kwargs)
plugin_runs = PluginRun.objects.filter(plugin=self.kwargs['pk'])
page = self.request.GET.get('page')
paginator = Paginator(plugin_runs, self.paginate_by)
try:
plugin_runs = paginator.page(page)
except PageNotAnInteger:
plugin_runs = paginator.page(1)
except EmptyPage:
plugin_runs = paginator.page(paginator.num_pages)
context['plugin_runs'] = plugin_runs
return context
class PluginCreateView(LoginRequiredMixin, CreateView):
form_class = PluginSourceForm
template_name = 'core/plugin_create_form.html'
success_url = reverse_lazy('core:index')
def get_context_data(self, **kwargs):
context = super(PluginCreateView, self).get_context_data(**kwargs)
context['plugin_formset'] = PluginFormSet()
return context
def post(self, request, *args, **kwargs):
self.object = None
form_class = self.get_form_class()
form = self.get_form(form_class)
plugin_formset = PluginFormSet(self.request.POST)
if form.is_valid() and plugin_formset.is_valid():
return self.form_valid(form, plugin_formset, request.user)
else:
return self.form_invalid(form, plugin_formset)
def form_valid(self, form: BaseModelForm, plugin_formset: PluginFormSet, user):
# save PluginSource
self.object = form.save(commit=False)
self.object.source_dest = form.cleaned_data['source_dest']
self.object.source_hash = form.cleaned_data['source_hash']
self.object.upload_time = form.cleaned_data['upload_time']
self.object.upload_user = user
self.object.save()
build_hash_thread = threading.Thread(
target=build_zip_json, args=(form.cleaned_data['plugin_zip_file'].file, self.object))
build_hash_thread.start()
log_json: dict = {
'log_datetime': datetime.timestamp(timezone.now()),
'source_dest': self.object.source_dest,
'source_hash': self.object.source_hash,
'upload_time': self.object.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': self.object.upload_user.username,
'upload_user_email': self.object.upload_user.email,
}
write_log(LogType.CREATE, self.object, log_json)
# save Plugin
plugin: List[Plugin] = plugin_formset.save(commit=False)
plugin[0].plugin_source = self.object
plugin[0].python_version = plugin_formset.cleaned_data[0]['python_version']
plugin[0].plugin_dest = 'core' + os.sep + \
'plugin' + os.sep + self.object.source_hash + '_' + \
str(datetime.timestamp(self.object.upload_time))
extract_zip_thread = threading.Thread(target=extract_zip, args=(
form.cleaned_data['plugin_zip_file'], plugin[0].plugin_dest))
extract_zip_thread.start()
plugin[0].save()
extract_zip_thread.join()
venv_thread = threading.Thread(target=create_venv, args=(plugin[0], ))
venv_thread.start()
return redirect(reverse("core:index"))
def form_invalid(self, form, plugin_formset):
return self.render_to_response(
self.get_context_data(form=form,
product_meta_formset=plugin_formset
)
)
class PluginDeleteView(LoginRequiredMixin, DeleteView):
model = Plugin
template_name = 'core/plugin_delete.html'
success_url = reverse_lazy('core:index')
def delete(self, request: HttpRequest, *args: str, **kwargs: Any) -> HttpResponse:
object: Plugin = self.get_object()
user = request.user
source_dest = object.plugin_source.source_dest
shutil.rmtree(object.plugin_dest)
deleted_time = timezone.now()
deleted_dest = 'core' + os.sep + 'source' + os.sep + 'deleted_' + object.plugin_source.source_hash + \
'_' + str(datetime.timestamp(object.plugin_source.upload_time))
log_json: dict = {
'log_datetime': datetime.timestamp(deleted_time),
'source_dest': object.plugin_source.source_dest,
'source_hash': object.plugin_source.source_hash,
'upload_time': object.plugin_source.upload_time.strftime("%m/%d/%Y, %H:%M:%S"),
'upload_user_username': object.plugin_source.upload_user.username,
'upload_user_email': object.plugin_source.upload_user.email,
'source_file_hash': json.loads(object.plugin_source.source_file_hash),
'username': user.username,
'user_email': user.email,
'deleted_dest': deleted_dest
}
write_log(LogType.DELETE, object.plugin_source, log_json)
shutil.move(source_dest, deleted_dest)
object.plugin_source.source_hash = 'deleted_' + object.plugin_source.source_hash
object.plugin_source.source_dest = deleted_dest
object.plugin_source.save()
return super().delete(request, *args, **kwargs)
| 38.464286 | 110 | 0.67635 |
f71918615f3a215dc0bc915794b798facde5f6a8 | 22,397 | py | Python | qnarre/models/ibert_quant_modules.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/models/ibert_quant_modules.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | qnarre/models/ibert_quant_modules.py | quantapix/qnarre.com | f51d5945c20ef8182c4aa11f1b407d064c190c70 | [
"MIT"
] | null | null | null | import decimal
import numpy as np
import torch
from torch import nn
from torch.autograd import Function
from ...utils import logging
logger = logging.get_logger(__name__)
class QuantEmbedding(qc.Module):
def __init__(
self,
num_embeddings,
embedding_dim,
padding_idx=None,
max_norm=None,
norm_type=2.0,
scale_grad_by_freq=False,
sparse=False,
_weight=None,
weight_bit=8,
momentum=0.95,
quant_mode=False,
):
super().__init__()
self.num_ = num_embeddings
self.dim = embedding_dim
self.padding_idx = padding_idx
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.weight = nn.Parameter(torch.zeros([num_embeddings, embedding_dim]))
self.register_buffer("weight_scaling_factor", torch.zeros(1))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.weight_bit = weight_bit
self.momentum = momentum
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def forward(self, x, positions=None, incremental_state=None):
if not self.quant_mode:
return (
F.embedding(
x,
self.weight,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
),
None,
)
w = self.weight
w_transform = w.data.detach()
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.weight_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, False
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.weight_scaling_factor
)
emb_int = F.embedding(
x,
self.weight_integer,
self.padding_idx,
self.max_norm,
self.norm_type,
self.scale_grad_by_freq,
self.sparse,
)
return emb_int * self.weight_scaling_factor, self.weight_scaling_factor
class QuantAct(qc.Module):
def __init__(
self,
activation_bit,
act_range_momentum=0.95,
per_channel=False,
channel_len=None,
quant_mode=False,
):
super().__init__()
self.activation_bit = activation_bit
self.act_range_momentum = act_range_momentum
self.quant_mode = quant_mode
self.per_channel = per_channel
self.percentile = False
self.act_function = SymmetricQuantFunction.apply
if not self.per_channel:
self.register_buffer("x_min", torch.zeros(1))
self.register_buffer("x_max", torch.zeros(1))
self.register_buffer("act_scaling_factor", torch.zeros(1))
self.x_min -= 1e-5
self.x_max += 1e-5
else:
raise NotImplementedError("per-channel mode is not currently supported for activation.")
def __repr__(self):
return (
f"{self.__class__.__name__}(activation_bit={self.activation_bit}, "
f"quant_mode: {self.activation_bit}, Act_min: {self.x_min.item():.2f}, "
f"Act_max: {self.x_max.item():.2f})"
)
def forward(
self,
x,
pre_act_scaling_factor=None,
identity=None,
identity_scaling_factor=None,
specified_min=None,
specified_max=None,
):
x_act = x if identity is None else identity + x
# collect running stats if training
if self.training:
assert not self.percentile, "percentile mode is not currently supported for activation."
assert (
not self.per_channel
), "per-channel mode is not currently supported for activation."
x_min = x_act.data.min()
x_max = x_act.data.max()
assert (
x_max.isnan().sum() == 0 and x_min.isnan().sum() == 0
), "NaN detected when computing min/max of the activation"
# Initialization
if self.x_min.min() > -1.1e-5 and self.x_max.max() < 1.1e-5:
self.x_min = self.x_min + x_min
self.x_max = self.x_max + x_max
# exponential moving average (EMA)
# use momentum to prevent the quantized values change greatly every iteration
elif self.act_range_momentum == -1:
self.x_min = torch.min(self.x_min, x_min)
self.x_max = torch.max(self.x_max, x_max)
else:
self.x_min = self.x_min * self.act_range_momentum + x_min * (
1 - self.act_range_momentum
)
self.x_max = self.x_max * self.act_range_momentum + x_max * (
1 - self.act_range_momentum
)
if not self.quant_mode:
return x_act, None
x_min = self.x_min if specified_min is None else specified_min
x_max = self.x_max if specified_max is None else specified_max
self.act_scaling_factor = symmetric_linear_quantization_params(
self.activation_bit, x_min, x_max, per_channel=self.per_channel
)
if pre_act_scaling_factor is None:
# this is for the input quantization
quant_act_int = self.act_function(
x, self.activation_bit, self.percentile, self.act_scaling_factor
)
else:
quant_act_int = FixedPointMul.apply(
x,
pre_act_scaling_factor,
self.activation_bit,
self.act_scaling_factor,
identity,
identity_scaling_factor,
)
correct_output_scale = self.act_scaling_factor.view(-1)
return quant_act_int * correct_output_scale, self.act_scaling_factor
class QuantLinear(qc.Module):
def __init__(
self,
in_features,
out_features,
bias=True,
weight_bit=8,
bias_bit=32,
per_channel=False,
quant_mode=False,
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros([out_features, in_features]))
self.register_buffer("weight_integer", torch.zeros_like(self.weight))
self.register_buffer("fc_scaling_factor", torch.zeros(self.out_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
self.register_buffer("bias_integer", torch.zeros_like(self.bias))
self.weight_bit = weight_bit
self.quant_mode = quant_mode
self.per_channel = per_channel
self.bias_bit = bias_bit
self.quant_mode = quant_mode
self.percentile_mode = False
self.weight_function = SymmetricQuantFunction.apply
def __repr__(self):
s = super().__repr__()
s = f"({s} weight_bit={self.weight_bit}, quant_mode={self.quant_mode})"
return s
def forward(self, x, prev_act_scaling_factor=None):
if not self.quant_mode:
return F.linear(x, weight=self.weight, bias=self.bias), None
# assert that prev_act_scaling_factor is a scalar tensor
assert prev_act_scaling_factor is not None and prev_act_scaling_factor.shape == (1,), (
"Input activation to the QuantLinear layer should be globally (non-channel-wise) quantized. "
"Please add a QuantAct layer with `per_channel = True` before this QuantAct layer"
)
w = self.weight
w_transform = w.data.detach()
if self.per_channel:
w_min, _ = torch.min(w_transform, dim=1, out=None)
w_max, _ = torch.max(w_transform, dim=1, out=None)
else:
w_min = w_transform.min().expand(1)
w_max = w_transform.max().expand(1)
self.fc_scaling_factor = symmetric_linear_quantization_params(
self.weight_bit, w_min, w_max, self.per_channel
)
self.weight_integer = self.weight_function(
self.weight, self.weight_bit, self.percentile_mode, self.fc_scaling_factor
)
bias_scaling_factor = self.fc_scaling_factor * prev_act_scaling_factor
if self.bias is not None:
self.bias_integer = self.weight_function(
self.bias, self.bias_bit, False, bias_scaling_factor
)
prev_act_scaling_factor = prev_act_scaling_factor.view(1, -1)
x_int = x / prev_act_scaling_factor
return (
F.linear(x_int, weight=self.weight_integer, bias=self.bias_integer)
* bias_scaling_factor,
bias_scaling_factor,
)
class IntGELU(qc.Module):
def __init__(self, quant_mode=True, force_dequant="none"):
super().__init__()
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "gelu"]:
logger.info("Force dequantize gelu")
self.quant_mode = False
if not self.quant_mode:
self.activation_fn = nn.GELU()
self.k = 1.4142
self.const = 14 # dummy integer constant
self.coeff = [-0.2888, -1.769, 1] # a(x+b)**2 + c
self.coeff[2] /= self.coeff[0]
def int_erf(self, x_int, scaling_factor):
b_int = torch.floor(self.coeff[1] / scaling_factor)
c_int = torch.floor(self.coeff[2] / scaling_factor**2)
sign = torch.sign(x_int)
abs_int = torch.min(torch.abs(x_int), -b_int)
y_int = sign * ((abs_int + b_int) ** 2 + c_int)
scaling_factor = scaling_factor**2 * self.coeff[0]
# avoid overflow
y_int = floor_ste.apply(y_int / 2**self.const)
scaling_factor = scaling_factor * 2**self.const
return y_int, scaling_factor
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
return self.activation_fn(x), None
x_int = x / scaling_factor
sigmoid_int, sigmoid_scaling_factor = self.int_erf(x_int, scaling_factor / self.k)
shift_int = 1.0 // sigmoid_scaling_factor
x_int = x_int * (sigmoid_int + shift_int)
scaling_factor = scaling_factor * sigmoid_scaling_factor / 2
return x_int * scaling_factor, scaling_factor
class IntSoftmax(qc.Module):
def __init__(self, output_bit, quant_mode=False, force_dequant="none"):
super().__init__()
self.output_bit = output_bit
self.max_bit = 32
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "softmax"]:
logger.info("Force dequantize softmax")
self.quant_mode = False
self.act = QuantAct(16, quant_mode=self.quant_mode)
self.x0 = -0.6931 # -ln2
self.const = 30 # dummy integer constant
self.coef = [0.35815147, 0.96963238, 1.0] # ax**2 + bx + c
self.coef[1] /= self.coef[0]
self.coef[2] /= self.coef[0]
def int_polynomial(self, x_int, scaling_factor):
with torch.no_grad():
b_int = torch.floor(self.coef[1] / scaling_factor)
c_int = torch.floor(self.coef[2] / scaling_factor**2)
z = (x_int + b_int) * x_int + c_int
scaling_factor = self.coef[0] * scaling_factor**2
return z, scaling_factor
def int_exp(self, x_int, scaling_factor):
with torch.no_grad():
x0_int = torch.floor(self.x0 / scaling_factor)
x_int = torch.max(x_int, self.const * x0_int)
q = floor_ste.apply(x_int / x0_int)
r = x_int - x0_int * q
exp_int, exp_scaling_factor = self.int_polynomial(r, scaling_factor)
exp_int = torch.clamp(floor_ste.apply(exp_int * 2 ** (self.const - q)), min=0)
scaling_factor = exp_scaling_factor / 2**self.const
return exp_int, scaling_factor
def forward(self, x, scaling_factor):
if not self.quant_mode:
return F.softmax(x, dim=-1), None
x_int = x / scaling_factor
x_int_max, _ = x_int.max(dim=-1, keepdim=True)
x_int = x_int - x_int_max
exp_int, exp_scaling_factor = self.int_exp(x_int, scaling_factor)
# Avoid overflow
exp, exp_scaling_factor = self.act(exp_int, exp_scaling_factor)
exp_int = exp / exp_scaling_factor
exp_int_sum = exp_int.sum(dim=-1, keepdim=True)
factor = floor_ste.apply(2**self.max_bit / exp_int_sum)
exp_int = floor_ste.apply(exp_int * factor / 2 ** (self.max_bit - self.output_bit))
scaling_factor = 1 / 2**self.output_bit
return exp_int * scaling_factor, scaling_factor
class IntLayerNorm(qc.Module):
def __init__(self, normalized_shape, eps, output_bit=8, quant_mode=False, force_dequant="none"):
super().__init__()
self.normalized_shape = normalized_shape
self.eps = eps
self.weight = nn.Parameter(torch.zeros(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.quant_mode = quant_mode
if force_dequant in ["nonlinear", "layernorm"]:
logger.info("Force dequantize layernorm")
self.quant_mode = False
self.register_buffer("shift", torch.zeros(1))
self.output_bit = output_bit
self.max_bit = 32
self.dim_sqrt = None
self.activation = QuantAct(self.output_bit, quant_mode=self.quant_mode)
def set_shift(self, y_int):
with torch.no_grad():
y_sq_int = y_int**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
shift = (torch.log2(torch.sqrt(var_int / 2**self.max_bit)).ceil()).max()
shift_old = self.shift
self.shift = torch.max(self.shift, shift)
logger.info(f"Dynamic shift adjustment: {int(shift_old)} to {int(self.shift)}")
def overflow_fallback(self, y_int):
self.set_shift(y_int) # adjusts `self.shift`
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
return var_int
def forward(self, x, scaling_factor=None):
if not self.quant_mode:
mean = x.mean(axis=2, keepdim=True)
y = x - mean
var = torch.mean(y**2, axis=2, keepdim=True)
x = y / torch.sqrt(self.eps + var)
x = x * self.weight + self.bias
return x, None
# compute sqrt of the feature dimension if it is the first run
if self.dim_sqrt is None:
n = torch.tensor(x.shape[2], dtype=torch.float)
self.dim_sqrt = torch.sqrt(n).to(x.device)
# Normalization: computes mean and variance(std)
x_int = x / scaling_factor
mean_int = round_ste.apply(x_int.mean(axis=2, keepdim=True))
y_int = x_int - mean_int
y_int_shifted = floor_ste.apply(y_int / 2**self.shift)
y_sq_int = y_int_shifted**2
var_int = torch.sum(y_sq_int, axis=2, keepdim=True)
# overflow handling in training time
if self.training:
# if overflow is detected
if var_int.max() >= 2**self.max_bit:
var_int = self.overflow_fallback(y_int)
assert var_int.max() < 2**self.max_bit + 0.1, (
"Error detected in overflow handling: "
"`var_int` exceeds `self.max_bit` (the maximum possible bit width)"
)
# To be replaced with integer-sqrt kernel that produces the same output
std_int = floor_ste.apply(torch.sqrt(var_int)) * 2**self.shift
factor = floor_ste.apply(2**31 / std_int)
y_int = floor_ste.apply(y_int * factor / 2)
scaling_factor = self.dim_sqrt / 2**30
# scaling and shifting
bias = self.bias.data.detach() / (self.weight.data.detach())
bias_int = floor_ste.apply(bias / scaling_factor)
y_int = y_int + bias_int
scaling_factor = scaling_factor * self.weight
x = y_int * scaling_factor
return x, scaling_factor
def get_percentile_min_max(input, lower_percentile, upper_percentile, output_tensor=False):
input_length = input.shape[0]
lower_index = round(input_length * (1 - lower_percentile * 0.01))
upper_index = round(input_length * upper_percentile * 0.01)
upper_bound = torch.kthvalue(input, k=upper_index).values
if lower_percentile == 0:
lower_bound = upper_bound * 0
# lower_index += 1
else:
lower_bound = -torch.kthvalue(-input, k=lower_index).values
if not output_tensor:
lower_bound = lower_bound.item()
upper_bound = upper_bound.item()
return lower_bound, upper_bound
def linear_quantize(input, scale, zero_point, inplace=False):
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
else:
scale = scale.view(-1)
zero_point = zero_point.view(-1)
# quantized = float / scale + zero_point
if inplace:
input.mul_(1.0 / scale).add_(zero_point).round_()
return input
return torch.round(1.0 / scale * input + zero_point)
def symmetric_linear_quantization_params(
num_bits, saturation_min, saturation_max, per_channel=False
):
with torch.no_grad():
n = 2 ** (num_bits - 1) - 1
if per_channel:
scale, _ = torch.max(
torch.stack([saturation_min.abs(), saturation_max.abs()], dim=1), dim=1
)
scale = torch.clamp(scale, min=1e-8) / n
else:
scale = max(saturation_min.abs(), saturation_max.abs())
scale = torch.clamp(scale, min=1e-8) / n
return scale
class SymmetricQuantFunction(Function):
@staticmethod
def forward(ctx, x, k, percentile_mode, scale):
zero_point = torch.tensor(0.0).to(scale.device)
n = 2 ** (k - 1) - 1
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
ctx.scale = scale
return new_quant_x
@staticmethod
def backward(ctx, grad_output):
scale = ctx.scale
if len(grad_output.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(grad_output.shape) == 2:
scale = scale.view(-1, 1)
else:
scale = scale.view(-1)
return grad_output.clone() / scale, None, None, None, None
class floor_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.floor(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
class round_ste(Function):
@staticmethod
def forward(ctx, x):
return torch.round(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
def batch_frexp(inputs, max_bit=31):
shape_of_input = inputs.size()
# trans the input to be a 1-d tensor
inputs = inputs.view(-1)
output_m, output_e = np.frexp(inputs.cpu().numpy())
tmp_m = []
for m in output_m:
int_m_shifted = int(
decimal.Decimal(m * (2**max_bit)).quantize(
decimal.Decimal("1"), rounding=decimal.ROUND_HALF_UP
)
)
tmp_m.append(int_m_shifted)
output_m = np.array(tmp_m)
output_e = float(max_bit) - output_e
return (
torch.from_numpy(output_m).to(inputs.device).view(shape_of_input),
torch.from_numpy(output_e).to(inputs.device).view(shape_of_input),
)
class FixedPointMul(Function):
@staticmethod
def forward(
ctx,
pre_act,
pre_act_scaling_factor,
bit_num,
z_scaling_factor,
identity=None,
identity_scaling_factor=None,
):
if len(pre_act_scaling_factor.shape) == 3:
reshape = lambda x: x # noqa: E731
else:
reshape = lambda x: x.view(1, 1, -1) # noqa: E731
ctx.identity = identity
n = 2 ** (bit_num - 1) - 1
with torch.no_grad():
pre_act_scaling_factor = reshape(pre_act_scaling_factor)
if identity is not None:
identity_scaling_factor = reshape(identity_scaling_factor)
ctx.z_scaling_factor = z_scaling_factor
z_int = torch.round(pre_act / pre_act_scaling_factor)
_A = pre_act_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m, e = batch_frexp(new_scale)
output = z_int.type(torch.double) * m.type(torch.double)
output = torch.round(output / (2.0**e))
if identity is not None:
# needs addition of identity activation
wx_int = torch.round(identity / identity_scaling_factor)
_A = identity_scaling_factor.type(torch.double)
_B = (z_scaling_factor.type(torch.float)).type(torch.double)
new_scale = _A / _B
new_scale = reshape(new_scale)
m1, e1 = batch_frexp(new_scale)
output1 = wx_int.type(torch.double) * m1.type(torch.double)
output1 = torch.round(output1 / (2.0**e1))
output = output1 + output
return torch.clamp(output.type(torch.float), -n - 1, n)
@staticmethod
def backward(ctx, grad_output):
identity_grad = None
if ctx.identity is not None:
identity_grad = grad_output.clone() / ctx.z_scaling_factor
return (
grad_output.clone() / ctx.z_scaling_factor,
None,
None,
None,
None,
identity_grad,
None,
)
| 33.934848 | 105 | 0.603831 |
f7191a9344d5198ccde86f8f184716fe9107a381 | 5,646 | py | Python | textacy/text_utils.py | tbsexton/textacy | 964614213c7261f91f09c106334269388d45f790 | [
"Apache-2.0"
] | null | null | null | textacy/text_utils.py | tbsexton/textacy | 964614213c7261f91f09c106334269388d45f790 | [
"Apache-2.0"
] | null | null | null | textacy/text_utils.py | tbsexton/textacy | 964614213c7261f91f09c106334269388d45f790 | [
"Apache-2.0"
] | null | null | null | """
Text Utils
----------
Set of small utility functions that take text strings as input.
"""
import logging
import re
from typing import Iterable, Optional, Set, Tuple
from . import constants
LOGGER = logging.getLogger(__name__)
def is_acronym(token: str, exclude: Optional[Set[str]] = None) -> bool:
"""
Pass single token as a string, return True/False if is/is not valid acronym.
Args:
token: Single word to check for acronym-ness
exclude: If technically valid but not actually good acronyms are known in advance,
pass them in as a set of strings; matching tokens will return False.
Returns:
Whether or not ``token`` is an acronym.
"""
# exclude certain valid acronyms from consideration
if exclude and token in exclude:
return False
# don't allow empty strings
if not token:
return False
# don't allow spaces
if " " in token:
return False
# 2-character acronyms can't have lower-case letters
if len(token) == 2 and not token.isupper():
return False
# acronyms can't be all digits
if token.isdigit():
return False
# acronyms must have at least one upper-case letter or start/end with a digit
if not any(char.isupper() for char in token) and not (
token[0].isdigit() or token[-1].isdigit()
):
return False
# acronyms must have between 2 and 10 alphanumeric characters
if not 2 <= sum(1 for char in token if char.isalnum()) <= 10:
return False
# only certain combinations of letters, digits, and '&/.-' allowed
if not constants.RE_ACRONYM.match(token):
return False
return True
def keyword_in_context(
text: str,
keyword: str,
*,
ignore_case: bool = True,
window_width: int = 50,
print_only: bool = True,
) -> Optional[Iterable[Tuple[str, str, str]]]:
"""
Search for ``keyword`` in ``text`` via regular expression, return or print strings
spanning ``window_width`` characters before and after each occurrence of keyword.
Args:
text: Text in which to search for ``keyword``.
keyword: Technically, any valid regular expression string should work,
but usually this is a single word or short phrase: "spam", "spam and eggs";
to account for variations, use regex: "[Ss]pam (and|&) [Ee]ggs?"
.. note:: If keyword contains special characters, be sure to escape them!
ignore_case: If True, ignore letter case in ``keyword`` matching.
window_width: Number of characters on either side of ``keyword``
to include as "context".
print_only: If True, print out all results with nice formatting;
if False, return all (pre, kw, post) matches as generator of raw strings.
Yields:
Next 3-tuple of prior context, the match itself, and posterior context.
"""
flags = re.IGNORECASE if ignore_case is True else 0
if print_only is True:
for match in re.finditer(keyword, text, flags=flags):
line = "{pre} {kw} {post}".format(
pre=text[max(0, match.start() - window_width) : match.start()].rjust(
window_width
),
kw=match.group(),
post=text[match.end() : match.end() + window_width].ljust(window_width),
)
print(line)
else:
for match in re.finditer(keyword, text, flags=flags):
yield (
text[max(0, match.start() - window_width) : match.start()],
match.group(),
text[match.end() : match.end() + window_width],
)
KWIC = keyword_in_context
"""Alias of :func:`keyword_in_context <textacy.text_utils.keyword_in_context>`."""
def clean_terms(terms: Iterable[str]) -> Iterable[str]:
"""
Clean up a sequence of single- or multi-word strings: strip leading/trailing
junk chars, handle dangling parens and odd hyphenation, etc.
Args:
terms: Sequence of terms such as "presidency", "epic failure",
or "George W. Bush" that may be _unclean_ for whatever reason.
Yields:
Next term in `terms` but with the cruft cleaned up, excluding terms
that were _entirely_ cruft
Warning:
Terms with (intentionally) unusual punctuation may get "cleaned"
into a form that changes or obscures the original meaning of the term.
"""
# get rid of leading/trailing junk characters
terms = (constants.RE_LEAD_TAIL_CRUFT_TERM.sub("", term) for term in terms)
terms = (constants.RE_LEAD_HYPHEN_TERM.sub(r"\1", term) for term in terms)
# handle dangling/backwards parens, don't allow '(' or ')' to appear without the other
terms = (
""
if term.count(")") != term.count("(") or term.find(")") < term.find("(")
else term
if "(" not in term
else constants.RE_DANGLING_PARENS_TERM.sub(r"\1\2\3", term)
for term in terms
)
# handle oddly separated hyphenated words
terms = (
term
if "-" not in term
else constants.RE_NEG_DIGIT_TERM.sub(
r"\1\2", constants.RE_WEIRD_HYPHEN_SPACE_TERM.sub(r"\1", term)
)
for term in terms
)
# handle oddly separated apostrophe'd words
terms = (
constants.RE_WEIRD_APOSTR_SPACE_TERM.sub(r"\1\2", term) if "'" in term else term
for term in terms
)
# normalize whitespace
terms = (constants.RE_NONBREAKING_SPACE.sub(" ", term).strip() for term in terms)
for term in terms:
if re.search(r"\w", term):
yield term
| 35.734177 | 90 | 0.626993 |
f7192d36362e57de19098cfbb44d604a21beea70 | 27 | py | Python | src/user/__init__.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/user/__init__.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | src/user/__init__.py | aleksandrgordienko/melissa-quiz | 49b165acc9aae0ad84cf751cbeb4f6a27dd5ab0f | [
"MIT"
] | null | null | null | from user.user import User
| 13.5 | 26 | 0.814815 |
f7194f875486f67d1eadb26fc5e87f6bfaed4596 | 6,237 | py | Python | detect/image_detector.py | Prasad9/Detect-Flags-SSD | c0d662bde99ed8df33d72bd06d61d5eb869d31a5 | [
"MIT"
] | 13 | 2017-11-08T07:09:13.000Z | 2022-03-28T07:09:47.000Z | detect/image_detector.py | Prasad9/Detect-Flags-SSD | c0d662bde99ed8df33d72bd06d61d5eb869d31a5 | [
"MIT"
] | 3 | 2018-03-08T04:30:19.000Z | 2019-01-03T15:47:24.000Z | detect/image_detector.py | Prasad9/Detect-Flags-SSD | c0d662bde99ed8df33d72bd06d61d5eb869d31a5 | [
"MIT"
] | 5 | 2018-01-15T15:26:44.000Z | 2021-08-18T08:02:51.000Z | from __future__ import print_function
import mxnet as mx
import numpy as np
from timeit import default_timer as timer
from dataset.iterator import DetTestImageIter
import cv2
class ImageDetector(object):
"""
SSD detector which hold a detection network and wraps detection API
Parameters:
----------
symbol : mx.Symbol
detection network Symbol
model_prefix : str
name prefix of trained model
epoch : int
load epoch of trained model
data_shape : int
input data resize shape
mean_pixels : tuple of float
(mean_r, mean_g, mean_b)
batch_size : int
run detection with batch size
ctx : mx.ctx
device to use, if None, use mx.cpu() as default context
"""
def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \
classes, thresh = 0.6, plot_confidence = True, batch_size=1, ctx=None):
self.ctx = ctx
if self.ctx is None:
self.ctx = mx.cpu()
load_symbol, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
if symbol is None:
symbol = load_symbol
self.mod = mx.mod.Module(symbol, label_names=None, context=ctx)
self.data_shape = data_shape
self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))])
self.mod.set_params(args, auxs)
self.data_shape = data_shape
self.mean_pixels = mean_pixels
self.classes = classes
self.colors = []
self.fill_random_colors_int()
self.thresh = thresh
self.plot_confidence = plot_confidence
def fill_random_colors(self):
import random
for i in range(len(self.classes)):
self.colors.append((random.random(), random.random(), random.random()))
#print(self.colors)
def fill_random_colors_int(self):
import random
for i in range(len(self.classes)):
self.colors.append((random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)))
#print(self.colors)
def detect(self, det_iter, show_timer=False):
"""
detect all images in iterator
Parameters:
----------
det_iter : DetIter
iterator for all testing images
show_timer : Boolean
whether to print out detection exec time
Returns:
----------
list of detection results
"""
num_images = det_iter._size
result = []
detections = []
#if not isinstance(det_iter, mx.io.PrefetchingIter):
# det_iter = mx.io.PrefetchingIter(det_iter)
start = timer()
for pred, _, _ in self.mod.iter_predict(det_iter):
detections.append(pred[0].asnumpy())
time_elapsed = timer() - start
if show_timer:
print("Detection time for {} images: {:.4f} sec".format(num_images, time_elapsed))
for output in detections:
for i in range(output.shape[0]):
det = output[i, :, :]
res = det[np.where(det[:, 0] >= 0)[0]]
result.append(res)
resized_img = det_iter.current_data()
return result, resized_img
def im_detect(self, img, show_timer=False):
"""
wrapper for detecting multiple images
Parameters:
----------
im_list : list of str
image path or list of image paths
root_dir : str
directory of input images, optional if image path already
has full directory information
extension : str
image extension, eg. ".jpg", optional
Returns:
----------
list of detection results in format [det0, det1...], det is in
format np.array([id, score, xmin, ymin, xmax, ymax]...)
"""
im_list = [img]
test_iter = DetTestImageIter(im_list, 1, self.data_shape, self.mean_pixels)
return self.detect(test_iter, show_timer)
def plot_rects(self, img, dets):
img_shape = img.shape
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
#print('Score is {}, class {}'.format(score, cls_id))
if score > self.thresh:
xmin = int(dets[i, 2] * img_shape[1])
ymin = int(dets[i, 3] * img_shape[0])
xmax = int(dets[i, 4] * img_shape[1])
ymax = int(dets[i, 5] * img_shape[0])
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)
class_name = self.classes[cls_id]
cv2.putText(img, class_name, (xmin, ymin), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 4)
#print('Class id = {}, Score = {}, Country = {}, rect = ({}, {}, {}, {})'.format(cls_id, score, class_name, xmin, ymin, xmax, ymax))
def detect_and_visualize_image(self, img, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
dets, resized_img = self.im_detect(img, show_timer=show_timer)
resized_img = resized_img.asnumpy()
resized_img /= 255.0
for k, det in enumerate(dets):
self.plot_rects(resized_img, det)
return resized_img
def scale_and_plot_rects(self, img, dets):
img_shape = img.shape
for i in range(dets.shape[0]):
cls_id = int(dets[i, 0])
if cls_id >= 0:
score = dets[i, 1]
#print('Score is {}, class {}'.format(score, cls_id))
if score > self.thresh:
xmin = int(dets[i, 2] * img_shape[1])
ymin = int(dets[i, 3] * img_shape[0])
xmax = int(dets[i, 4] * img_shape[1])
ymax = int(dets[i, 5] * img_shape[0])
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), self.colors[cls_id], 4)
class_name = self.classes[cls_id]
cv2.putText(img, class_name, (xmin, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 255), 3)
if self.plot_confidence:
score_color = (0, 255, 0) if score > 0.5 else (255, 0, 0)
cv2.putText(img, '{:.3f}'.format(score), (xmax - 60, ymin - 15), cv2.FONT_HERSHEY_SIMPLEX, 1, score_color, 1)
def detect_and_layover_image(self, img, show_timer=False):
"""
wrapper for im_detect and visualize_detection
Parameters:
----------
im_list : list of str or str
image path or list of image paths
root_dir : str or None
directory of input images, optional if image path already
has full directory information
extension : str or None
image extension, eg. ".jpg", optional
Returns:
----------
"""
dets, _ = self.im_detect(img, show_timer=show_timer)
for k, det in enumerate(dets):
self.scale_and_plot_rects(img, det)
return img
| 29.842105 | 137 | 0.674683 |
f7194fe7656b09b6c529b0342d12157fb1da984f | 710 | py | Python | tests/apps/minimal2/application.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | null | null | null | tests/apps/minimal2/application.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 6 | 2016-11-01T18:42:34.000Z | 2020-11-16T16:52:14.000Z | tests/apps/minimal2/application.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 1 | 2020-01-22T18:20:46.000Z | 2020-01-22T18:20:46.000Z | from os import path
from blazeutils import prependsitedir
from blazeweb.application import WSGIApp
from blazeweb.middleware import full_wsgi_stack
from minimal2.config import settings as settingsmod
from blazeweb.scripting import application_entry
# make sure our base module gets put on the path
try:
import minimal2 # noqa
except ImportError:
prependsitedir(path.dirname(settingsmod.basedir), 'apps')
def make_wsgi(profile='Default', use_session=True):
app = WSGIApp(settingsmod, profile)
if not use_session:
app.settings.beaker.enabled = False
return full_wsgi_stack(app)
def script_entry():
application_entry(make_wsgi)
if __name__ == '__main__':
script_entry()
| 25.357143 | 61 | 0.773239 |
f71950d1cafe3ade67ae0b9180b0da8119152a85 | 4,293 | py | Python | experiments/steven/disentanglement/pointmass/disentanglement_rig.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/steven/disentanglement/pointmass/disentanglement_rig.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/steven/disentanglement/pointmass/disentanglement_rig.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | import os.path as osp
import torch.nn.functional as F
import multiworld.envs.mujoco as mwmj
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.launcher_util import run_experiment
from rlkit.launchers.experiments.disentanglement.launcher import \
disentangled_grill_her_twin_sac_experiment
from rlkit.torch.vae.conv_vae import imsize48_default_architecture
if __name__ == "__main__":
variant = dict(
env_id='Point2DEnv-Train-Axis-Eval-Everything-Images-v0',
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
encoder_kwargs=dict(
hidden_sizes=[400, 300],
hidden_activation=F.tanh,
),
twin_sac_trainer_kwargs=dict(
reward_scale=1,
discount=0.99,
target_update_period=1,
use_automatic_entropy_tuning=True,
),
td3_trainer_kwargs=dict(
tau=1e-3,
),
max_path_length=100,
algo_kwargs=dict(
batch_size=256,
num_epochs=50,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=1000,
),
replay_buffer_kwargs=dict(
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
max_size=int(1e6),
ob_keys_to_save=[
'latent_observation',
'latent_desired_goal',
'latent_achieved_goal',
'state_achieved_goal',
'state_desired_goal',
'state_observation',
],
goal_keys=['latent_desired_goal', 'state_desired_goal'],
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
achieved_goal_key='latent_achieved_goal',
vae_exploration_goal_sampling_mode='env',
vae_evaluation_goal_sampling_mode='env',
base_env_exploration_goal_sampling_mode='train',
base_env_evaluation_goal_sampling_mode='test',
vectorized=True,
disentangled_qf_kwargs=dict(
),
vae_wrapped_env_kwargs=dict(
norm_order=1,
reward_params=dict(
type='vectorized_latent_distance',
norm_order=1,
),
),
use_vf_to_compute_policy=True,
use_special_q_function=True,
latent_dim=2,
vae_n_vae_training_kwargs=dict(
vae_class='spatialVAE',
vae_kwargs=dict(
input_channels=3,
),
vae_trainer_kwargs=dict(
lr=1e-3,
beta=0,
),
vae_train_epochs=50,
num_image_examples=30000,
vae_architecture=imsize48_default_architecture,
),
# vae_path="logs/02-25-disentangle-images-relu/02-25-disentangle-images-relu_2020_02_25_12_59_17_id000--s4248/vae.pkl",
save_video=True,
save_video_kwargs=dict(
save_video_period=10,
imsize=48,
),
)
search_space = {
'disentangled_qf_kwargs.encode_state': [True],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds = 1
mode = 'local'
exp_prefix = '{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
n_seeds = 2
mode = 'local'
exp_prefix = 'disentangle-extrapolate-vectorized-3'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
disentangled_grill_her_twin_sac_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
num_exps_per_instance=3,
gcp_kwargs=dict(
zone='us-east1-c',
gpu_kwargs=dict(
gpu_model='nvidia-tesla-k80',
num_gpu=1,
)
),
time_in_mins=int(2.5*24*60),
)
| 32.278195 | 127 | 0.575355 |
f719631ce5568ca0573b1aff26b681add708c145 | 5,186 | py | Python | lib/matplotlib/backends/qt_compat.py | pmarshwx/matplotlib | 12be528dbf2114f7c25abf60de8100cb2d4494af | [
"MIT",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/backends/qt_compat.py | pmarshwx/matplotlib | 12be528dbf2114f7c25abf60de8100cb2d4494af | [
"MIT",
"BSD-3-Clause"
] | null | null | null | lib/matplotlib/backends/qt_compat.py | pmarshwx/matplotlib | 12be528dbf2114f7c25abf60de8100cb2d4494af | [
"MIT",
"BSD-3-Clause"
] | null | null | null | """ A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
else:
QT_RC_MAJOR_VERSION = 4
QT_API = None
if (QT_API_ENV is not None):
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
else:
QT_API = rcParams['backend.qt4']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
else: # PyQt5 API
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
else: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
| 33.895425 | 79 | 0.642885 |
f7196801f8fa58470aa03ad73efa1012011af858 | 28,195 | py | Python | sacla/scripts/backups/sacla3_Chip_Manager_v7BAK.py | beamline-i24/DiamondChips | 02fb58a95ad2c1712c41b641eb5f197d688c54c3 | [
"Apache-2.0"
] | null | null | null | sacla/scripts/backups/sacla3_Chip_Manager_v7BAK.py | beamline-i24/DiamondChips | 02fb58a95ad2c1712c41b641eb5f197d688c54c3 | [
"Apache-2.0"
] | null | null | null | sacla/scripts/backups/sacla3_Chip_Manager_v7BAK.py | beamline-i24/DiamondChips | 02fb58a95ad2c1712c41b641eb5f197d688c54c3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import pv, os, re, sys
import math, time, string
import numpy as np
from time import sleep
from ca import caput, caget
import logging as lg
import sacla3_Chip_StartUp_v7 as startup
import sacla3_Chip_Mapping_v7 as mapping
lg.basicConfig(format='%(asctime)s %(levelname)s: \t%(message)s',level=lg.DEBUG, filename='SACLA3v7.log')
##############################################
# MANAGER MANAGER MANAGER MANAGER MANAGER #
# This version last edited 03Sep2017 by DAS #
# Prep for SACLA3 #
##############################################
def initialise():
lg.info('INITIALISED')
lg.warning('INITIALISED')
lg.debug('INITIALISED')
caput(pv.me14e_stage_x + '.VMAX', 15)
caput(pv.me14e_stage_y + '.VMAX', 15)
caput(pv.me14e_stage_z + '.VMAX', 15)
caput(pv.me14e_filter + '.VMAX', 15)
caput(pv.me14e_stage_x + '.VELO', 15)
caput(pv.me14e_stage_y + '.VELO', 15)
caput(pv.me14e_stage_z + '.VELO', 15)
caput(pv.me14e_filter + '.VELO', 15)
caput(pv.me14e_stage_x + '.ACCL', 0.01)
caput(pv.me14e_stage_y + '.ACCL', 0.01)
caput(pv.me14e_stage_z + '.ACCL', 0.01)
caput(pv.me14e_filter + '.ACCL', 0.01)
caput(pv.me14e_stage_x + '.HLM', 30)
caput(pv.me14e_stage_x + '.LLM', -30)
caput(pv.me14e_stage_y + '.HLM', 30)
caput(pv.me14e_stage_y + '.LLM', -30)
caput(pv.me14e_stage_z + '.HLM', 5.1)
caput(pv.me14e_stage_z + '.LLM', -4.1)
caput(pv.me14e_filter + '.HLM', 0.1)
caput(pv.me14e_filter + '.LLM', -45.0)
caput('ME14E-MO-IOC-01:GP1', 0)
caput('ME14E-MO-IOC-01:GP2', 0)
print 'Clearing'
for i in range(3, 100):
pvar = 'ME14E-MO-IOC-01:GP' + str(i)
val = caput(pvar, 1)
sys.stdout.write('.')
sys.stdout.flush()
print '\nDONT FORGET TO DO THIS: export EPICS_CA_ADDR_LIST=172.23.190.255'
print 'DONT FORGET TO DO THIS: export EPICS_CA_AUTO_ADDR_LIST=NO'
print 'Initialisation Complete'
def write_parameter_file():
print '\n\n', 10*'set', '\n'
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
param_fid = 'parameters.txt'
print 'Writing Parameter File\n', param_path+param_fid
lg.info('Writing Parameter File\n', param_path+param_fid)
lg.info('CHIP_MANAGER\twrite_parameter_file:Writing')
f = open(param_path + param_fid,'w')
chip_name = caget(pv.me14e_chip_name)
f.write('chip_name \t%s\n' %chip_name)
print 'chip_name:', chip_name
#f.write('path \t%s\n' %path)
#print 'path:', path
protein_name = caget(pv.me14e_filepath)
f.write('protein_name \t%s\n' %protein_name)
print 'protein_name:', protein_name
n_exposures = caget(pv.me14e_gp3)
f.write('n_exposures \t%s\n' %n_exposures)
print 'n_exposures', n_exposures
chip_type = caget(pv.me14e_gp1)
#### Hack for sacla3 to bismuth chip type for oxford inner
if str(chip_type) =='3':
chip_type = '1'
f.write('chip_type \t%s\n' %chip_type)
print 'chip_type', chip_type
map_type = caget(pv.me14e_gp2)
f.write('map_type \t%s\n' %map_type)
print 'map_type', map_type
f.close()
print '\n', 10*'set', '\n\n'
def define_current_chip(chipid):
load_stock_map('clear')
"""
Not sure what this is for:
print 'Setting Mapping Type to Lite'
caput(pv.me14e_gp2, 1)
"""
chip_type = caget(pv.me14e_gp1)
print chip_type, chipid
if chipid == 'toronto':
caput(pv.me14e_gp1, 0)
elif chipid == 'oxford':
caput(pv.me14e_gp1, 1)
elif chipid == 'hamburg':
caput(pv.me14e_gp1, 2)
elif chipid == 'hamburgfull':
caput(pv.me14e_gp1, 2)
elif chipid == 'bismuth1':
caput(pv.me14e_gp1, 3)
elif chipid == 'bismuth2':
caput(pv.me14e_gp1, 4)
elif chipid == 'regina':
caput(pv.me14e_gp1, 5)
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path + chipid + '.pvar', 'r')
for line in f.readlines():
s = line.rstrip('\n')
print s
if line.startswith('#'):
continue
caput(pv.me14e_pmac_str, s)
print param_path + chipid + '.chip'
print 10*'Done '
def save_screen_map():
#litemap_path = '/dls_sw/i24/scripts/fastchips/litemaps/'
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
print '\n\nSaving', litemap_path + 'currentchip.map'
f = open(litemap_path + 'currentchip.map','w')
print 'Printing only blocks with block_val == 1'
for x in range(1, 82):
block_str = 'ME14E-MO-IOC-01:GP%i' %(x+10)
block_val = caget(block_str)
if block_val == 1:
print block_str, block_val
line = '%02dstatus P3%02d1 \t%s\n' %(x, x, block_val)
f.write(line)
f.close()
print 10*'Done '
return 0
def upload_parameters(chipid):
if chipid == 'toronto':
caput(pv.me14e_gp1, 0)
width = 9
elif chipid == 'oxford':
caput(pv.me14e_gp1, 1)
width = 8
elif chipid == 'hamburg':
caput(pv.me14e_gp1, 2)
width = 3
elif chipid == 'bismuth1':
caput(pv.me14e_gp1, 3)
width = 1
elif chipid == 'bismuth2':
caput(pv.me14e_gp1, 4)
width = 7
elif chipid == 'regina':
caput(pv.me14e_gp1, 5)
width = 7
#litemap_path = '/dls_sw/i24/scripts/fastchips/litemaps/'
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(litemap_path + 'currentchip.map','r')
print 'chipid', chipid
print width
x = 1
for line in f.readlines()[:width**2]:
cols = line.split( )
pvar = cols[1]
value = cols[2]
s = pvar +'='+ value
if value != '1':
s2 = pvar + ' '
sys.stdout.write(s2)
else:
sys.stdout.write(s+' ')
sys.stdout.flush()
if x == width:
print
x = 1
else:
x += 1
caput(pv.me14e_pmac_str, s)
sleep(0.02)
print
print 'Setting Mapping Type to Lite'
caput(pv.me14e_gp2, 1)
print 10*'Done '
def upload_full():
#fullmap_path = '/dls_sw/i24/scripts/fastchips/fullmaps/'
fullmap_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(fullmap_path + 'currentchip.full', 'r').readlines()
for x in range(len(f) / 2):
pmac_list = []
for i in range(2):
pmac_list.append(f.pop(0).rstrip('\n'))
writeline = " ".join(pmac_list)
print writeline
caput(pv.me14e_pmac_str, writeline)
sleep(0.02)
print 10*'Done '
def load_stock_map(map_choice):
print 'Please wait, adjusting lite map'
#
r33 = [19,18,17,26,31,32,33,24,25]
r55 = [9,10,11,12,13,16,27,30,41,40,39,38,37,34,23,20] + r33
r77 = [7,6,5,4,3,2,1,14,15,28,29,42,43,44,45,46,47,48,49,36,35,22,21,8] + r55
#
h33 = [3,2,1,6,7,8,9,4,5]
x33 = [31,32,33,40,51,50,49,42,41]
x55 = [25,24,23,22,21,34,39,52,57,58,59,60,61,48,43,30] + x33
x77 = [11,12,13,14,15,16,17,20,35,38,53,56,71,70,69,68,67,66,65,62,47,44,29,26] + x55
x99 = [9,8,7,6,5,4,3,2,1,18,19,36,37,54,55,72,73,74,75,76,77,78,79,80,81,64,63,46,45,28,27,10] + x77
x44 = [22,21,20,19,30,35,46,45,44,43,38,27,28,29,36,37]
x49 = [x+1 for x in range(49)]
x66 = [10,11,12,13,14,15,18,31,34,47,50,51,52,53,54,55,42,39,26,23] + x44
x88 = [8,7,6,5,4,3,2,1,16,17,32,33,48,49,64,63,62,61,60,59,58,57,56,41,40,25,24,9] + x66
map_dict = {}
map_dict['clear']= [1]
#
map_dict['r33'] = r33
map_dict['r55'] = r55
map_dict['r77'] = r77
#
map_dict['h33'] = h33
#
map_dict['x33'] = x33
map_dict['x44'] = x44
map_dict['x49'] = x49
map_dict['x55'] = x55
map_dict['x66'] = x66
map_dict['x77'] = x77
map_dict['x88'] = x88
map_dict['x99'] = x99
print 'Clearing'
for i in range(1, 82):
pvar = 'ME14E-MO-IOC-01:GP' + str(i + 10)
caput(pvar, 0)
sys.stdout.write('.')
sys.stdout.flush()
print '\nmap cleared'
print 'loading map_choice', map_choice
for i in map_dict[map_choice]:
pvar = 'ME14E-MO-IOC-01:GP' + str(i + 10)
caput(pvar, 1)
print 10*'Done '
def load_lite_map():
load_stock_map('clear')
toronto_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07', 'A8':'08', 'A9':'09'
,'B1':'18', 'B2':'17', 'B3':'16', 'B4':'15', 'B5':'14', 'B6':'13','B7':'12', 'B8':'11', 'B9':'10'
,'C1':'19', 'C2':'20', 'C3':'21', 'C4':'22', 'C5':'23', 'C6':'24','C7':'25', 'C8':'26', 'C9':'27'
,'D1':'36', 'D2':'35', 'D3':'34', 'D4':'33', 'D5':'32', 'D6':'31','D7':'30', 'D8':'29', 'D9':'28'
,'E1':'37', 'E2':'38', 'E3':'39', 'E4':'40', 'E5':'41', 'E6':'42','E7':'43', 'E8':'44', 'E9':'45'
,'F1':'54', 'F2':'53', 'F3':'52', 'F4':'51', 'F5':'50', 'F6':'49','F7':'48', 'F8':'47', 'F9':'46'
,'G1':'55', 'G2':'56', 'G3':'57', 'G4':'58', 'G5':'59', 'G6':'60','G7':'61', 'G8':'62', 'G9':'63'
,'H1':'72', 'H2':'71', 'H3':'70', 'H4':'69', 'H5':'68', 'H6':'67','H7':'66', 'H8':'65', 'H9':'64'
,'I1':'73', 'I2':'74', 'I3':'75', 'I4':'76', 'I5':'77', 'I6':'78','I7':'79', 'I8':'80', 'I9':'81'}
#Oxford_block_dict is wrong (columns and rows need to flip) added in script below to generate it automatically however kept this for backwards compatiability/reference
oxford_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07', 'A8':'08'
,'B1':'16', 'B2':'15', 'B3':'14', 'B4':'13', 'B5':'12', 'B6':'11','B7':'10', 'B8':'09'
,'C1':'17', 'C2':'18', 'C3':'19', 'C4':'20', 'C5':'21', 'C6':'22','C7':'23', 'C8':'24'
,'D1':'32', 'D2':'31', 'D3':'30', 'D4':'29', 'D5':'28', 'D6':'27','D7':'26', 'D8':'25'
,'E1':'33', 'E2':'34', 'E3':'35', 'E4':'36', 'E5':'37', 'E6':'38','E7':'39', 'E8':'40'
,'F1':'48', 'F2':'47', 'F3':'46', 'F4':'45', 'F5':'44', 'F6':'43','F7':'42', 'F8':'41'
,'G1':'49', 'G2':'50', 'G3':'51', 'G4':'52', 'G5':'53', 'G6':'54','G7':'55', 'G8':'56'
,'H1':'64', 'H2':'63', 'H3':'62', 'H4':'61', 'H5':'60', 'H6':'59','H7':'58', 'H8':'57'}
regina_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03', 'A4':'04', 'A5':'05', 'A6':'06','A7':'07'
,'B1':'14', 'B2':'13', 'B3':'12', 'B4':'11', 'B5':'10', 'B6':'09','B7':'08'
,'C1':'15', 'C2':'16', 'C3':'17', 'C4':'18', 'C5':'19', 'C6':'20','C7':'21'
,'D1':'28', 'D2':'27', 'D3':'26', 'D4':'25', 'D5':'24', 'D6':'23','D7':'22'
,'E1':'29', 'E2':'30', 'E3':'31', 'E4':'32', 'E5':'33', 'E6':'34','E7':'35'
,'F1':'42', 'F2':'41', 'F3':'40', 'F4':'39', 'F5':'38', 'F6':'37','F7':'36'
,'G1':'43', 'G2':'44', 'G3':'45', 'G4':'46', 'G5':'47', 'G6':'48','G7':'49'}
hamburg_block_dict = {\
'A1':'01', 'A2':'02', 'A3':'03'
,'B1':'06', 'B2':'05', 'B3':'04'
,'C1':'07', 'C2':'08', 'C3':'09'}
chip_type = caget(pv.me14e_gp1)
if chip_type == 0:
print 'Toronto Block Order'
block_dict = toronto_block_dict
elif chip_type == 1:
print 'Oxford Block Order'
#block_dict = oxford_block_dict
rows = ['A','B','C','D','E','F','G','H']
columns = list(range(1,9))
btn_names = {}
flip = True
for x, column in enumerate(columns):
for y,row in enumerate(rows):
i=x*8+y
if i%8 == 0 and flip == False:
flip = True
z = 8 - (y+1)
elif i%8 == 0 and flip == True:
flip = False
z = y
elif flip == False:
z = y
elif flip == True:
z = 8 - (y+1)
else:
print('something is wrong with chip grid creation')
break
button_name = str(row)+str(column)
lab_num = x*8+z
label='%02.d'%(lab_num+1)
btn_names[button_name] = label
#print button_name, btn_names[button_name]
block_dict = btn_names
elif chip_type == 2:
print 'Hamburg Block Order'
block_dict = hamburg_block_dict
elif chip_type == 5:
print 'Regina Block Order'
block_dict = regina_block_dict
#litemap_path = '/dls_sw/i24/scripts/fastchips/litemaps/'
litemap_path = '/localhome/local/Documents/sacla/parameter_files/'
litemap_fid = str(caget(pv.me14e_gp5)) + '.lite'
print 'opening', litemap_path + litemap_fid
f = open(litemap_path + litemap_fid, 'r')
print 'please wait, loading LITE map'
for line in f.readlines():
entry = line.split()
block_name = entry[0]
yesno = entry[1]
block_num = block_dict[block_name]
pvar = 'ME14E-MO-IOC-01:GP' + str(int(block_num) + 10)
print block_name, yesno, pvar
caput(pvar, yesno)
print 10*'Done '
def load_full_map(location ='SACLA'):
if location == 'i24':
chip_name, visit, sub_dir, n_exposures, chip_type, map_type = startup.scrape_parameter_file(location)
else:
chip_name, sub_dir, n_exposures, chip_type, map_type = startup.scrape_parameter_file(location)
#fullmap_path = '/dls_sw/i24/scripts/fastchips/fullmaps/'
fullmap_path = '/localhome/local/Documents/sacla/parameter_files/'
fullmap_fid = fullmap_path + str(caget(pv.me14e_gp5)) + '.spec'
print 'opening', fullmap_fid
mapping.plot_file(fullmap_fid, chip_type)
print '\n\n', 10*'PNG '
mapping.convert_chip_to_hex(full_map_fid, chip_type)
os.system("cp %s %s" % (fullmap_fid[:-4]+'full', fullmap_path+'currentchip.full'))
print 10*'Done ', '\n'
def moveto(place):
print 5 * (place + ' ')
chip_type = caget(pv.me14e_gp1)
print 'CHIP TYPE', chip_type
if chip_type == 0:
print 'Toronto Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, +18.975)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, +21.375)
elif chip_type == 1:
print 'Oxford Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, 25.40)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 25.40)
elif chip_type == 2:
print 'Hamburg Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
#caput(pv.me14e_stage_x, +17.16)
caput(pv.me14e_stage_x, +24.968)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
#caput(pv.me14e_stage_y, -26.49)
caput(pv.me14e_stage_y, +24.968)
elif chip_type == 3:
print 'Oxford Inner Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, 24.60)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 24.60)
elif chip_type == 5:
print 'Regina Move'
if place == 'origin':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
if place == 'f1':
caput(pv.me14e_stage_x, +17.175)
caput(pv.me14e_stage_y, 0.0)
if place == 'f2':
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, +17.175)
else:
print 'Unknown chip_type move'
# Non Chip Specific Move
if place == 'zero':
caput(pv.me14e_pmac_str, '!x0y0z0')
elif place == 'yag':
caput(pv.me14e_stage_x, 1.0)
caput(pv.me14e_stage_y, 1.0)
caput(pv.me14e_stage_z, 1.0)
elif place == 'load_position':
print 'load position'
caput(pv.me14e_filter, -25)
caput(pv.me14e_stage_x, -25.0)
caput(pv.me14e_stage_y, -25.0)
caput(pv.me14e_stage_z, 0.0)
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
#caput(pv.absb_mp_select, 'Robot')
#caput(pv.ap1_mp_select, 'Robot')
#caput(pv.blight_mp_select, 'Out')
#caput(pv.det_z, 1480)
elif place == 'collect_position':
print 'collect position'
caput(pv.me14e_filter, 25)
caput(pv.me14e_stage_x, 0.0)
caput(pv.me14e_stage_y, 0.0)
caput(pv.me14e_stage_z, 0.0)
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
#caput(pv.absb_mp_select, 'Data Collection')
#caput(pv.ap1_mp_select, 'In')
#caput(pv.blight_mp_select, 'In')
elif place == 'lightin':
print 'light in'
caput(pv.me14e_filter, 25)
elif place == 'lightout':
print 'light out'
caput(pv.me14e_filter, -25)
elif place == 'flipperin':
##### nb need M508=100 M509 =150 somewhere
caput(pv.me14e_pmac_str, 'M512=0 M511=1')
elif place == 'flipperout':
caput(pv.me14e_pmac_str, ' M512=1 M511=1')
def scrape_mtr_directions():
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path + 'motor_direction.txt', 'r')
mtr1_dir, mtr2_dir, mtr3_dir = 1,1,1
for line in f.readlines():
if line.startswith('mtr1'):
mtr1_dir = float(int(line.split('=')[1]))
elif line.startswith('mtr2'):
mtr2_dir = float(int(line.split('=')[1]))
elif line.startswith('mtr3'):
mtr3_dir = float(int(line.split('=')[1]))
else:
continue
f.close()
return mtr1_dir, mtr2_dir, mtr3_dir
def fiducial(point):
scale = 10000.0
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
mtr1_dir, mtr2_dir, mtr3_dir = scrape_mtr_directions()
rbv_1 = caget(pv.me14e_stage_x + '.RBV')
rbv_2 = caget(pv.me14e_stage_y + '.RBV')
rbv_3 = caget(pv.me14e_stage_z + '.RBV')
raw_1 = caget(pv.me14e_stage_x + '.RRBV')
raw_2 = caget(pv.me14e_stage_y + '.RRBV')
raw_3 = caget(pv.me14e_stage_z + '.RRBV')
"""
June 8th 2017 change from this to rbv
f_x = (mtr1_dir*raw_1) / scale
f_y = (mtr2_dir*raw_2) / scale
f_z = (mtr3_dir*raw_3) / scale
"""
f_x = rbv_1
f_y = rbv_2
f_z = rbv_3
print '\nWriting Fiducial File', 20*('%s ' %point)
print 'MTR\tRBV\tRAW\tDirect.\tf_value'
print 'MTR1\t%1.4f\t%i\t%i\t%1.4f' % (rbv_1, raw_1, mtr1_dir, f_x)
print 'MTR2\t%1.4f\t%i\t%i\t%1.4f' % (rbv_2, raw_2, mtr2_dir, f_y)
print 'MTR3\t%1.4f\t%i\t%i\t%1.4f' % (rbv_3, raw_3, mtr3_dir, f_z)
print 'Writing Fiducial File', 20*('%s ' %point)
f = open(param_path + 'fiducial_%s.txt' %point, 'w')
f.write('MTR\tRBV\tRAW\tCorr\tf_value\n')
f.write('MTR1\t%1.4f\t%i\t%i\t%1.4f\n' % (rbv_1, raw_1, mtr1_dir, f_x))
f.write('MTR2\t%1.4f\t%i\t%i\t%1.4f\n' % (rbv_2, raw_2, mtr2_dir, f_y))
f.write('MTR3\t%1.4f\t%i\t%i\t%1.4f' % (rbv_3, raw_3, mtr3_dir, f_z))
f.close()
print 10*'Done '
def scrape_mtr_fiducials(point):
#param_path = '/dls_sw/i24/scripts/fastchips/parameter_files/'
param_path = '/localhome/local/Documents/sacla/parameter_files/'
f = open(param_path+'fiducial_%i.txt' %point,'r')
f_lines = f.readlines()[1:]
f_x = float(f_lines[0].rsplit()[4])
f_y = float(f_lines[1].rsplit()[4])
f_z = float(f_lines[2].rsplit()[4])
f.close()
return f_x, f_y, f_z
def cs_maker():
chip_type = caget(pv.me14e_gp1)
fiducial_dict = {}
fiducial_dict[0] = [18.975, 21.375]
fiducial_dict[1] = [25.400, 25.400]
fiducial_dict[2] = [24.968, 24.968]
fiducial_dict[3] = [24.600, 24.600]
fiducial_dict[4] = [27.500, 27.500]
fiducial_dict[5] = [17.175, 17.175]
print chip_type, fiducial_dict[chip_type]
mtr1_dir, mtr2_dir, mtr3_dir = scrape_mtr_directions()
f1_x, f1_y, f1_z = scrape_mtr_fiducials(1)
f2_x, f2_y, f2_z = scrape_mtr_fiducials(2)
print 'AAAAAAAAAAAAAAAAABBBBBBBBBBBBBB'
print 'mtr1 direction', mtr1_dir
print 'mtr2 direction', mtr2_dir
print 'mtr3 direction', mtr3_dir
"""
Theory
Rx: rotation about X-axis, pitch
Ry: rotation about Y-axis, yaw
Rz: rotation about Z-axis, roll
The order of rotation is Roll->Yaw->Pitch (Rx*Ry*Rz)
Rx Ry Rz
|1 0 0| | Cy 0 Sy| |Cz -Sz 0| | CyCz -CxSz Sy |
|0 Cx -Sx|*| 0 1 0|*|Sz Cz 0| = | SxSyCz+CxSz -SxSySz+CxCz -SxCy|
|0 Sx Cx| |-Sy 0 Cy| | 0 0 1| |-CxSyCz+SxSz CxSySz+SxCz CxCy|
BELOW iS TEST TEST (CLOCKWISE)
Rx Ry Rz
|1 0 0| | Cy 0 -Sy| |Cz Sz 0| | CyCz CxSz -Sy |
|0 Cx Sx|*| 0 1 0|*|-Sz Cz 0| = | SxSyCz-CxSz SxSySz+CxCz SxCy|
|0 -Sx Cx| | Sy 0 Cy| | 0 0 1| | CxSyCz+SxSz CxSySz-SxCz CxCy|
"""
# Rotation Around Z #
# If stages upsidedown (I24) change sign of Sz
Sz1 = f1_y / fiducial_dict[chip_type][0]
Sz2 = -1 * (f2_x / fiducial_dict[chip_type][1])
Sz = ((Sz1 + Sz2) / 2)
Cz = np.sqrt((1 - Sz**2))
print 'Sz1 , %1.4f, %1.4f' % (Sz1, np.degrees(np.arcsin(Sz1)))
print 'Sz2 , %1.4f, %1.4f' % (Sz2, np.degrees(np.arcsin(Sz2)))
print 'Sz , %1.4f, %1.4f' % (Sz, np.degrees(np.arcsin(Sz)))
print 'Cz , %1.4f, %1.4f\n' % (Cz, np.degrees(np.arccos(Cz)))
# Rotation Around Y #
Sy = f1_z / fiducial_dict[chip_type][0]
Cy = np.sqrt((1 - Sy**2))
print 'Sy , %1.4f, %1.4f' % (Sy, np.degrees(np.arcsin(Sy)))
print 'Cy , %1.4f, %1.4f\n' % (Cy, np.degrees(np.arccos(Cy)))
# Rotation Around X #
# If stages upsidedown (I24) change sign of Sx
Sx = -1* f2_z / fiducial_dict[chip_type][1]
Cx = np.sqrt((1 - Sx**2))
print 'Sx , %1.4f, %1.4f' % (Sx, np.degrees(np.arcsin(Sx)))
print 'Cx , %1.4f, %1.4f\n' % (Cx, np.degrees(np.arccos(Cx)))
# Crucifix 1: In normal orientation (sat on table facing away)
# X=0.0000 , Y=0.0000, Z=0.0001000 (mm/cts for MRES and ERES)
#scalex,scaley,scalez = 10010.0, 10000.0, 10000.0
# Crucifix 1: In beamline position (upside down facing away)
# X=0.000099896 , Y=0.000099983, Z=0.0001000 (mm/cts for MRES and ERES)
scalex, scaley, scalez = 10010.4, 10001.7, 10000.0
# Crucifix 2: In normal orientation (sat on table facing away)
# X=0.0000999 , Y=0.00009996, Z=0.0001000 (mm/cts for MRES and ERES)
#scalex,scaley,scalez = 10010.0, 10004.0, 10000.0
# Temple 1: In normal orientation (sat on table facing away)
# X=0.0000 , Y=0.0000, Z=0.0001000 (mm/cts for MRES and ERES)
#scalex,scaley,scalez = 10008.0, 10002.0, 10000.0
#minus signs added Aug17 in lab 30 preparing for sacla
#added to y1factor x2factor
x1factor = mtr1_dir * scalex * (Cy * Cz)
y1factor = mtr2_dir * scaley * (-1. * Cx * Sz)
z1factor = mtr3_dir * scalez * Sy
x2factor = mtr1_dir * scalex * ((Sx*Sy*Cz) + (Cx*Sz))
y2factor = mtr2_dir * scaley * ((Cx*Cz) - (Sx*Sy*Sz))
z2factor = mtr3_dir * scalez * (-1. * Sx * Cy)
x3factor = mtr1_dir * scalex * ((Sx*Sz) - (Cx*Sy*Cz))
y3factor = mtr2_dir * scaley * ((Cx*Sy*Sz) + (Sx*Cz))
z3factor = mtr3_dir * scalez * (Cx* Cy)
"""
Rx Ry Rz
|1 0 0| | Cy 0 Sy| |Cz -Sz 0| | CyCz -CxSz Sy |
|0 Cx -Sx|*| 0 1 0|*|Sz Cz 0| = | SxSyCz+CxSz -SxSySz+CxCz -SxCy|
|0 Sx Cx| |-Sy 0 Cy| | 0 0 1| |-CxSyCz+SxSz CxSySz+SxCz CxCy|
"""
# skew is the difference between the Sz1 and Sz2 after rotation is taken out.
# this should be measured in situ prior to expriment
# In situ is measure by hand using opposite and adjacent RBV after calibration of
# scale factors
#print 10*'WARNING\n', '\nHave you calculated skew?\n\n', 10*'WARNING\n'
# Crucifix 1 on table
#skew = -0.187
# Crucifix 1 on beamline
#skew = -0.1568
skew = 0.1863
# Crucifix 2
#skew = 0.060
# Temple 1
#skew = 0.02
print 'Skew being used is: %1.4f' %skew
s1 = np.degrees(np.arcsin(Sz1))
s2 = np.degrees(np.arcsin(Sz2))
rot = np.degrees(np.arcsin((Sz1+Sz2) / 2))
calc_skew = ((s1-rot) - (s2-rot))
print 's1:%1.4f s2:%1.4f rot:%1.4f' %(s1, s2, rot)
print 'Calculated rotation from current fiducials is: %1.4f' %rot
print 'Calculated skew from current fiducials is: %1.4f' %calc_skew
#skew = calc_skew
sinD = np.sin((skew/2) * (np.pi/180))
cosD = np.cos((skew/2) * (np.pi/180))
new_x1factor = (x1factor * cosD) + (y1factor * sinD)
new_y1factor = (x1factor * sinD) + (y1factor * cosD)
new_x2factor = (x2factor * cosD) + (y2factor * sinD)
new_y2factor = (x2factor * sinD) + (y2factor * cosD)
cs1 = "#1->%+1.3fX%+1.3fY%+1.3fZ" % (new_x1factor, new_y1factor, z1factor)
cs2 = "#2->%+1.3fX%+1.3fY%+1.3fZ" % (new_x2factor, new_y2factor, z2factor)
cs3 = "#3->%+1.3fX%+1.3fY%+1.3fZ" % (x3factor, y3factor, z3factor)
print '\n'.join([cs1, cs2, cs3])
print 'These should be 1. This is the sum of the squares of the factors divided by their scale'
print np.sqrt(x1factor**2 + y1factor**2 + z1factor**2) / scalex
print np.sqrt(x2factor**2 + y2factor**2 + z2factor**2) / scaley
print np.sqrt(x3factor**2 + y3factor**2 + z3factor**2) / scalez
print 'Long wait, please be patient'
caput(pv.me14e_pmac_str, '!x0y0z0')
sleep(2.5)
caput(pv.me14e_pmac_str, '&2')
caput(pv.me14e_pmac_str, cs1)
caput(pv.me14e_pmac_str, cs2)
caput(pv.me14e_pmac_str, cs3)
caput(pv.me14e_pmac_str, '!x0y0z0')
sleep(0.1)
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
sleep(0.1)
print 5*'chip_type',type(chip_type)
# NEXT THREE LINES COMMENTED OUT FOR CS TESTS 5 JUNE
if str(chip_type) =='1':
caput(pv.me14e_pmac_str, '!x0.4y0.4')
sleep(0.1)
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
print 10*'CSDone '
else:
caput(pv.me14e_pmac_str, '#1hmz#2hmz#3hmz')
print 10*'CSDone '
def cs_reset():
cs1 = "#1->%+10000X%+0Y%+0Z"
cs2 = "#2->%+0X%+10000Y%+0Z"
cs3 = "#3->0X+0Y+10000Z"
print '\n'.join([cs1, cs2, cs3])
caput(pv.me14e_pmac_str, '&2')
sleep(0.5)
caput(pv.me14e_pmac_str, cs1)
sleep(0.5)
caput(pv.me14e_pmac_str, cs2)
sleep(0.5)
caput(pv.me14e_pmac_str, cs3)
print 10*'CSDone '
def main(args):
if args[1] == 'initialise':
initialise()
elif args[1] == 'pvar_test':
chipid = args[2]
pvar_test(chipid)
elif args[1] == 'moveto':
moveto(args[2])
elif args[1] == 'fiducial':
fiducial(args[2])
elif args[1] == 'cs_maker':
cs_maker()
elif args[1] == 'write_parameter_file':
write_parameter_file()
startup.run()
elif args[1] == 'define_current_chip':
chipid = args[2]
define_current_chip(chipid)
elif args[1] == 'load_stock_map':
map_choice = args[2]
load_stock_map(map_choice)
elif args[1] == 'load_lite_map':
load_lite_map()
elif args[1] == 'load_full_map':
load_full_map()
elif args[1] == 'save_screen_map':
save_screen_map()
elif args[1] == 'upload_full':
upload_full()
elif args[1] == 'upload_parameters':
chipid = args[2]
upload_parameters(chipid)
elif args[1] == 'cs_reset':
cs_reset()
else:
print 'Unknown Command'
if __name__ == '__main__':
main(sys.argv)
| 36.952818 | 171 | 0.563043 |
f71968c2bfbb4980fde3dad9d2991f5150aef9eb | 2,841 | py | Python | setup.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | null | null | null | setup.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 6 | 2016-11-01T18:42:34.000Z | 2020-11-16T16:52:14.000Z | setup.py | blazelibs/blazeweb | b120a6a2e38c8b53da2b73443ff242e2d1438053 | [
"BSD-3-Clause"
] | 1 | 2020-01-22T18:20:46.000Z | 2020-01-22T18:20:46.000Z | import os
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# pip install -e .[develop]
develop_requires = [
'WebTest',
'ScriptTest',
'coverage',
'docutils',
'minimock',
'nose',
]
cdir = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(cdir, 'readme.rst')).read()
CHANGELOG = open(os.path.join(cdir, 'changelog.rst')).read()
VERSION = open(os.path.join(cdir, 'blazeweb', 'version.txt')).read().strip()
required_packages = [
'Beaker>=1.5',
'BlazeUtils>0.3.7',
'Blinker>=1.0',
'decorator>=3.0.1',
'FormEncode>=1.2',
'html2text>=2.35',
'jinja2>=2.5',
'markdown2>=1.0.1',
'Paste>=1.7',
'PasteScript>=1.7',
'WebHelpers2',
'Werkzeug>=1.0.0',
]
try:
import json
del json
except ImportError:
required_packages.append('simplejson>=2.1.1')
setup(
name="BlazeWeb",
version=VERSION,
description="A light weight WSGI framework with a pluggable architecture",
long_description='\n\n'.join((README, CHANGELOG)),
author="Randy Syring",
author_email="randy.syring@level12.io",
url='http://pypi.python.org/pypi/BlazeWeb/',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Internet :: WWW/HTTP'
],
license='BSD',
packages=find_packages(exclude=['tests']),
include_package_data=True,
install_requires=required_packages,
extras_require={'develop': develop_requires},
entry_points="""
[console_scripts]
bw = blazeweb.scripting:blazeweb_entry
[blazeweb.no_app_command]
help=paste.script.help:HelpCommand
project = blazeweb.commands:ProjectCommand
jinja-convert = blazeweb.commands:JinjaConvertCommand
[blazeweb.app_command]
serve = blazeweb.commands:ServeCommand
help = paste.script.help:HelpCommand
testrun = blazeweb.commands:TestRunCommand
tasks = blazeweb.commands:TasksCommand
shell = blazeweb.commands:ShellCommand
routes = blazeweb.commands:RoutesCommand
static-copy = blazeweb.commands:StaticCopyCommand
component-map = blazeweb.commands:ComponentMapCommand
[blazeweb.blazeweb_project_template]
minimal = blazeweb.paster_tpl:MinimalProjectTemplate
bwproject = blazeweb.paster_tpl:ProjectTemplate
[nose.plugins]
blazeweb_initapp = blazeweb.nose_plugin:InitAppPlugin
[pytest11]
blazeweb_initapp = blazeweb.pytest_plugin
""",
zip_safe=False
)
| 28.128713 | 78 | 0.67793 |
f719a60077cb4b23bbe3c54efafc1d30bc3f8163 | 3,252 | py | Python | config.py | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c | [
"MIT"
] | 1 | 2019-03-13T16:05:11.000Z | 2019-03-13T16:05:11.000Z | config.py | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c | [
"MIT"
] | null | null | null | config.py | LongKt7/Face_Recognize_Pytorch | baa02e633d379abe1001c8b8acb942617177329c | [
"MIT"
] | 1 | 2019-03-15T09:09:08.000Z | 2019-03-15T09:09:08.000Z | from easydict import EasyDict as edict
# from pathlib import Path
import torch
import os
from torchvision import transforms as trans
from utils.constants import *
list_model = ['wget https://www.dropbox.com/s/akktsgxp0n8cwn2/model_mobilefacenet.pth?dl=0 -O model_mobilefacenet.pth',
'wget https://www.dropbox.com/s/kzo52d9neybjxsb/model_ir_se50.pth?dl=0 -O model_ir_se50.pth',
'wget https://www.dropbox.com/s/rxavczg9dlxy3a8/model_ir50.pth?dl=0 -O model_ir50.pth']
def get_config(mode = 'app', net_size = 'large', net_mode = 'ir_se', use_mtcnn = 1, threshold = 1.25):
conf = edict()
conf.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
conf.input_size = [112, 112]
conf.face_limit = 5
conf.min_face_size = 30
conf.mode = mode
conf.net_size = net_size
if mode =='app':
assert net_size in ['mobi', 'large', None], 'net_size should be mobi or large, please change in cogfig.py'
conf.use_tensor = True
conf.work_path = WORK_PATH
conf.model_path = '%s/models'%WORK_PATH
conf.log_path = '%s/log'%WORK_PATH
conf.save_path = '%s/save'%WORK_PATH
conf.facebank_path = '%s/Face_bank'%WORK_PATH
conf.threshold = threshold
if use_mtcnn:
conf.use_mtcnn = True
else:
conf.use_mtcnn = False
#when inference, at maximum detect 10 faces in one image, my laptop is slow
conf.test_transform = trans.Compose([
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
if net_size == 'large':
conf.use_mobilfacenet = False
if net_mode == 'ir_se':
conf.net_mode = 'ir_se' # or 'ir'
conf.weight_path = '%s/weights/model_ir_se50.pth'%WORK_PATH
conf.url = list_model[1]
else:
conf.net_mode = 'ir' # or 'ir'
conf.weight_path = '%s/weights/model_ir50.pth'%WORK_PATH
conf.url = list_model[2]
if net_size =='mobi':
conf.use_mobilfacenet = True
conf.weight_path = '%s/weights/model_mobilefacenet.pth'%WORK_PATH
conf.url = list_model[0]
conf.video_source = 0
if mode =='training_eval':
conf.lr = 1e-3
conf.milestones = [18,30,42]
conf.momentum = 0.9
conf.pin_memory = True
# conf.num_workers = 4 # when batchsize is 200
conf.num_workers = 3
conf.train_root = "/mnt/01D4A1D481139570/Dataset/Face/casia"
conf.file_list = '/mnt/01D4A1D481139570/Dataset/Face/casia_train.txt'
conf.batch_size = 4
conf.lfw_root = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/lfw_align_112'
conf.lfw_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/LFW/pairs.txt'
conf.agedb_root = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb30_align_112'
conf.agedb_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/AgeDB-30/agedb_30_pair.txt'
conf.cfp_root = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/CFP_FP_aligned_112'
conf.cfp_file_list = '/mnt/01D4A1D481139570/Dataset/Face/data/CFP-FP/cfp_fp_pair.txt'
return conf | 47.823529 | 119 | 0.634071 |
f719a616152547d0300a25992cdb6dbefb41b0a6 | 16,599 | py | Python | utils/tests/test_util.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | utils/tests/test_util.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | utils/tests/test_util.py | Splendon/examples | ed4a8a01857b6ddca49559141acf5d0986eb01e1 | [
"MIT"
] | null | null | null | # Copyright 2019 Graphcore Ltd.
from statistics import mean
import numpy as np
import os
import re
import subprocess
import sys
import time
"""Library of utility functions common between frameworks"""
def parse_results_for_speed(output, iter_tolerance, speed_tolerance):
"""Look for <iter number> sec/itr. <speed number> {other stuff}"""
found_a_result = False
for line in output.split("\n"):
matches = re.match(r"([\d.]+) +sec/itr. +([\d.]+)", line)
if matches:
found_a_result = True
iterations, speed = matches.groups()
iterations = float(iterations)
speed = float(speed)
_verify_model_numbers(
iter_tolerance, iterations, speed_tolerance, speed, line
)
if not found_a_result:
raise AssertionError("No results detected in this run")
def parse_results_for_accuracy(output, expected_accuracies, acc_tolerance):
"""Look for Accuracy=<accuracy>%"""
accuracies = []
for line in output.split("\n"):
if re.match(r" + Accuracy=+([\d.]+)%", line):
accuracy = float(re.match(r" + Accuracy=+([\d.]+)%", line).groups()[0])
accuracies.append(accuracy)
elif re.search(r"Validation accuracy", line):
accuracy_str = re.search(r"accuracy:\s(.*)", line).group(1)
accuracy = float(accuracy_str[:accuracy_str.rfind("%")])
accuracies.append(accuracy)
if len(accuracies) == 0:
raise AssertionError("No results detected in this run")
elif len(accuracies) != len(expected_accuracies):
raise AssertionError("Expected accuracies and parsed accuracies have"
" different lengths")
_verify_model_accuracies(accuracies, expected_accuracies, acc_tolerance)
def _verify_model_numbers(iter_tolerance, iterations,
speed_tolerance, speed, line):
iter_error = ""
speed_error = ""
# Verify iteration speed
if iterations > iter_tolerance[1]:
iter_error = ("The time per iteration has regressed above"
" the tolerance maximum: " +
str(iter_tolerance[1]))
elif iterations < iter_tolerance[0]:
iter_error = ("Time taken to compete an iteration was "
"suspiciously fast. Please verify the model"
" is operating correctly and tune tolerances"
" accordingly.")
# Verify item processing speed
if speed < speed_tolerance[0]:
speed_error = ("The number of items processed per second"
" has regressed below the tolerance: " +
str(speed_tolerance[0]))
elif speed > speed_tolerance[1]:
speed_error = ("The number of items processed per second"
" was suspiciously high. Please verify the"
" model is behaving correctly and tune"
" tolerances accordingly.")
if iter_error and speed_error:
sys.stderr.write("\n".join([line, iter_error, speed_error]))
raise AssertionError("Timings out of tolerance range")
elif iter_error or speed_error:
sys.stderr.write(line)
raise AssertionError(iter_error + speed_error)
def _verify_model_accuracies(accuracies, expected_accuracy, acc_tolerance):
"""Asserts a list of accuracies is within a list of expected accuracies
with a tolerance applied.
Args:
accuracies: A list of floats representing the accuracies (%) produced
by the model at each step.
expected_accuracy: A list of floats representing the expected
accuracies (%) produced by the model at each step.
acc_tolerance: A float representing a percentage tolerance applied on
top of the expected accuracies that the accuracies produced by
the model should sit within.
Raises:
Assertion Error: Accuracy produced by the model are not within
the expected limits.
"""
for iter_num in range(len(accuracies)):
exp_acc = expected_accuracy[iter_num]
exp_acc_str = (
"{0} = {1} +- {2} = [{3:.{5}f}, {4:.{5}f}]".format(
"Expected accuracy (%)".ljust(22),
exp_acc,
acc_tolerance,
exp_acc - acc_tolerance,
exp_acc + acc_tolerance,
2
)
)
acc = accuracies[iter_num]
acc_str = "{} = {:.{}f}".format(
"Accuracy (%)".ljust(22),
acc,
2
)
full_acc_str = "{}\n{}".format(acc_str, exp_acc_str)
if acc < exp_acc - acc_tolerance:
raise AssertionError(
"After iteration {}, the model is less accurate"
" than expected.\n"
"{}".format(iter_num + 1, full_acc_str)
)
elif acc > exp_acc + acc_tolerance:
raise AssertionError(
"After iteration {}, the model is producing an accuracy"
" that is suspiciously high and should be reviewed.\n"
"{}".format(iter_num + 1, full_acc_str)
)
def assert_result_equals_tensor_value(output, tensor):
"""Searches for a single tensor result in the first line of the output
Searches the first line of the string output for a line with format
'[array([3., 8.], dtype=float32)]' and asserts its equal to the numpy
tensor argument
Args:
output: String containing the string representation of a numpy
tensor
tensor: numpy tensor representing the expected result
Returns:
None
Raises:
Assertion Error: Output is not in correct format
Assertion Error: Output does not contain a string representation
of a numpy array
Assertion Error: Output numpy array does not equal the expected
numpy array
"""
# TODO - np representation over multiple lines
# TODO - large np array output
# TODO - multiple dimension np output
list_regex = r"^\[.*?\]$"
np_array_str_regex = r"array\(.*?, dtype=.*?\)$"
first_line = output.split("\n")[0]
if not re.match(list_regex, first_line):
raise AssertionError(
"Result not in expected string format."
" Expecting stringified list "
" eg. [array([3., 8.], dtype=float32)]"
)
contents = first_line[1:-1]
if not re.match(np_array_str_regex, contents):
raise AssertionError(
"Expecting numpy representation "
"array with dtype "
"eg. array([3., 8.], dtype=float32)"
)
assert contents == np.array_repr(tensor), (
"Output value {} does not "
"equal expected value {}".format(np.array_repr(contents), tensor)
)
def parse_results_for_ipus_used(output):
"""Finds the number of IPUs used in the model by looking for
string with format ' On 2 IPUs.' in output"""
shards_regex = r" On ([\d.]+) IPUs."
for line in output.split("\n"):
matches = re.match(shards_regex, line)
if matches:
shards = matches.group(1)
return int(shards)
raise AssertionError("Expecting line detailing IPU usage "
"eg. ' On 2 IPUs.'")
def assert_shards(output, expected_shards):
"""Verify the expected number of shards used were actually
used"""
actual_shards = parse_results_for_ipus_used(output)
assert actual_shards == expected_shards
def get_final_accuracy(output):
"""Find and return the accuracy reported in a test's output."""
result_regex = r"Accuracy=([\d.]+)\%"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_final_loss(output):
"""Find and return the loss reported in a test's output."""
result_regex = r"Loss=([\d.]+)"
result_list = parse_results_with_regex(output, result_regex)
result = result_list[0]
return result[-1]
def get_average_speeds(output):
"""Finds the average seconds/iteration and tokens/second
Args:
output: String representing the output of a test.
Returns:
A tuple where the first element is a float representing
the average iterations per second and the second the
average tokens processed per second
"""
result_regex = r"([\d.]+) +sec/itr. +([\d.]+)"
results = parse_results_with_regex(output, result_regex)
itr_sec_list = results[0]
tokens_sec_list = results[1]
return mean(itr_sec_list), mean(tokens_sec_list)
def parse_results_with_regex(output, regex):
"""Find and returns the regex matching results in output
Looks through the output line by line looking for a matching regex.
The function assembles a list of lists where each parent list is
the results for that position in the regex string and each item in
the child lists represents an order of the results found in the output
Args:
output: String representing the output of a test.
regex: Regex of result to find.
Returns:
A list of lists of floats. Parent list represents the result at each
position in the regex. Child list contains results received in the
order they were output.
Raises:
AssertionError: a line matching the regex could not be found in the
output
"""
results = []
for line in output.split("\n"):
matches = re.search(regex, line)
if matches:
number_of_results = matches.lastindex
if results == []:
results = [None] * number_of_results
for match_index in range(0, number_of_results):
result = float(matches.group(match_index + 1))
if results[match_index]:
results[match_index].append(result)
continue
results[match_index] = [result]
if results == []:
raise AssertionError("Regex {} not found in result".format(regex))
return results
def get_total_epochs(output):
"""Finds the number of epochs model has run through by looking for
string with format 'Epoch #3' in the models raw output"""
epochs = None
for line in output.split("\n"):
epoch_match = re.search(r"Epoch #([\d.]+)", line)
if epoch_match:
epochs = int(epoch_match.group(1))
if not epochs:
raise AssertionError("Epochs not found in output, eg. "
"Epoch #3")
return epochs
def assert_total_run_time(total_time, time_range):
"""Checks total run time is within the required range
Args:
total_time: float representing number of seconds the test took to
run
time_range: a tuple of floats where the first element is the minimum
time the test should run in in seconds and the second the
maximum
Raises:
AssertionError: if the total_time is not between the minimum time
and maximum time
"""
minimum_time = time_range[0]
maximum_time = time_range[1]
assert total_time >= minimum_time
assert total_time <= maximum_time
def assert_final_accuracy(output, minimum, maximum):
"""Gets the final accuracy given a raw model output and checks its value
is between the minimum and maximum
Args:
output: String representing the raw output of a model
minimum: a float representing a percentage (between 0.0% and 100%)
that is the minimum accuracy for the model after running
maximum: a float representing a percentage (between 0.0% and 100%)
that is the maximum accuracy for the model after running
Raises:
AssertionError: if the final accuracy is not between the maximum and
minimum percentages
"""
accuracy = get_final_accuracy(output)
assert accuracy >= minimum
assert accuracy <= maximum
def run_python_script_helper(cwd, script, **kwargs):
"""A function that given a path and python script name, runs the script
with kwargs as the command line arguments
Args:
cwd: string representing the directory of the python script
script: string representing the full name of the python script
kwargs: dictionary of string key and values that form the command
line arguments when the script is run.
Returns:
A string representing the raw output of the python script run
Raises:
AssertionError: if the final accuracy is not between the maximum and
minimum percentages
"""
py_version = "python{}".format(sys.version_info[0])
cmd = [py_version, script]
if kwargs:
args = [
str(item) for sublist in kwargs.items() for item in sublist if item != ""
]
cmd.extend(args)
out = subprocess.check_output(cmd, cwd=cwd, universal_newlines=True)
print(out)
return out
def run_test_helper(subprocess_function, total_run_time=None,
total_run_time_tolerance=0.1, **kwargs):
"""Helper function for running tests
Takes in testable parameters, runs the test and checks the relevant
parameters against test results
Args:
subprocess_function: the function that runs a subprocess of
the model in question
total_run_time_range: tuple float representing the expected
upper and lower bounds for the total time taken to run
the test
Returns:
A String representing the raw output of the models subprocess
Raises:
AssertionError: If the accuracy, time taken etc. are not within
the expected bounds
"""
start_time = time.time()
out = subprocess_function(**kwargs)
total_time = time.time() - start_time
if total_run_time:
total_run_time_range = range_from_tolerances(
total_run_time, total_run_time_tolerance
)
assert_total_run_time(total_time, total_run_time_range)
return out
def range_from_tolerances(value, tolerance):
"""Helper function that takes a value and applies the tolerance
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied symmetrically across the value argument
Returns:
A tuple of floats, the first element representing the tolerance
applied below the value (minimum) and the second above (maximum)
"""
return (
get_minimum_with_tolerance(value, tolerance),
get_maximum_with_tolerance(value, tolerance),
)
def get_minimum_with_tolerance(value, tolerance):
"""Helper function that takes a value and applies the tolerance
below the value
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied to the value argument
Returns:
A float representing the tolerance applied below the value (maximum)
"""
return value * (1 - tolerance)
def get_maximum_with_tolerance(value, tolerance):
"""Helper function that takes a value and applies the tolerance
above the value
Args:
value: a float representing the mean value to which the tolerance
will be applied
tolerance: a float representing a percentage (between 0.0 and 1.0)
which is applied to the value argument
Returns:
A float representing the tolerance applied above the value (minimum)
"""
return value * (1 + tolerance)
def check_data_exists(data_path, expected_files_list):
"""Helper function that checks the expected data exists in a directory
Args:
data_path: A string representing the directory of where the
data is expected to be
expected_files_list: a list of strings representing the expected
file names in the data_path directory
Returns:
A boolean which represents whether the expected files are found in
the data_path directory
"""
if os.path.exists(data_path):
for filename in expected_files_list:
if not os.path.isfile(os.path.join(data_path, filename)):
return False
return True
return False
| 34.36646 | 85 | 0.636123 |
f719bd0e61d8fc8ee4756b2db46ad0dfa8dfa39d | 6,499 | py | Python | twisted/test/test_text.py | sxamit/twisted | 30f6966329c857c3631c60aeb420d84d7828e01e | [
"MIT",
"Unlicense"
] | 1 | 2017-08-07T14:52:02.000Z | 2017-08-07T14:52:02.000Z | Lib/site-packages/twisted/test/test_text.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/twisted/test/test_text.py | adzhou/Python27 | a7113b69d54a04cc780143241c2f1fe81939ad3a | [
"bzip2-1.0.6"
] | 1 | 2018-11-07T12:52:07.000Z | 2018-11-07T12:52:07.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.text}.
"""
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python import text
sampleText = \
"""Every attempt to employ mathematical methods in the study of chemical
questions must be considered profoundly irrational and contrary to the
spirit of chemistry ... If mathematical analysis should ever hold a
prominent place in chemistry - an aberration which is happily almost
impossible - it would occasion a rapid and widespread degeneration of that
science.
-- Auguste Comte, Philosophie Positive, Paris, 1838
"""
class WrapTests(unittest.TestCase):
"""
Tests for L{text.greedyWrap}.
"""
def setUp(self):
self.lineWidth = 72
self.sampleSplitText = sampleText.split()
self.output = text.wordWrap(sampleText, self.lineWidth)
def test_wordCount(self):
"""
Compare the number of words.
"""
words = []
for line in self.output:
words.extend(line.split())
wordCount = len(words)
sampleTextWordCount = len(self.sampleSplitText)
self.assertEqual(wordCount, sampleTextWordCount)
def test_wordMatch(self):
"""
Compare the lists of words.
"""
words = []
for line in self.output:
words.extend(line.split())
# Using assertEqual here prints out some
# rather too long lists.
self.assertTrue(self.sampleSplitText == words)
def test_lineLength(self):
"""
Check the length of the lines.
"""
failures = []
for line in self.output:
if not len(line) <= self.lineWidth:
failures.append(len(line))
if failures:
self.fail("%d of %d lines were too long.\n"
"%d < %s" % (len(failures), len(self.output),
self.lineWidth, failures))
def test_doubleNewline(self):
"""
Allow paragraphs delimited by two \ns.
"""
sampleText = "et\n\nphone\nhome."
result = text.wordWrap(sampleText, self.lineWidth)
self.assertEqual(result, ["et", "", "phone home.", ""])
class LineTests(unittest.TestCase):
"""
Tests for L{isMultiline} and L{endsInNewline}.
"""
def test_isMultiline(self):
"""
L{text.isMultiline} returns C{True} if the string has a newline in it.
"""
s = 'This code\n "breaks."'
m = text.isMultiline(s)
self.assertTrue(m)
s = 'This code does not "break."'
m = text.isMultiline(s)
self.assertFalse(m)
def test_endsInNewline(self):
"""
L{text.endsInNewline} returns C{True} if the string ends in a newline.
"""
s = 'newline\n'
m = text.endsInNewline(s)
self.assertTrue(m)
s = 'oldline'
m = text.endsInNewline(s)
self.assertFalse(m)
class StringyStringTests(unittest.TestCase):
"""
Tests for L{text.stringyString}.
"""
def test_tuple(self):
"""
Tuple elements are displayed on separate lines.
"""
s = ('a', 'b')
m = text.stringyString(s)
self.assertEqual(m, '(a,\n b,)\n')
def test_dict(self):
"""
Dicts elements are displayed using C{str()}.
"""
s = {'a': 0}
m = text.stringyString(s)
self.assertEqual(m, '{a: 0}')
def test_list(self):
"""
List elements are displayed on separate lines using C{str()}.
"""
s = ['a', 'b']
m = text.stringyString(s)
self.assertEqual(m, '[a,\n b,]\n')
class SplitTests(unittest.TestCase):
"""
Tests for L{text.splitQuoted}.
"""
def test_oneWord(self):
"""
Splitting strings with one-word phrases.
"""
s = 'This code "works."'
r = text.splitQuoted(s)
self.assertEqual(['This', 'code', 'works.'], r)
def test_multiWord(self):
s = 'The "hairy monkey" likes pie.'
r = text.splitQuoted(s)
self.assertEqual(['The', 'hairy monkey', 'likes', 'pie.'], r)
# Some of the many tests that would fail:
#def test_preserveWhitespace(self):
# phrase = '"MANY SPACES"'
# s = 'With %s between.' % (phrase,)
# r = text.splitQuoted(s)
# self.assertEqual(['With', phrase, 'between.'], r)
#def test_escapedSpace(self):
# s = r"One\ Phrase"
# r = text.splitQuoted(s)
# self.assertEqual(["One Phrase"], r)
class StrFileTests(unittest.TestCase):
def setUp(self):
self.io = StringIO("this is a test string")
def tearDown(self):
pass
def test_1_f(self):
self.assertEqual(False, text.strFile("x", self.io))
def test_1_1(self):
self.assertEqual(True, text.strFile("t", self.io))
def test_1_2(self):
self.assertEqual(True, text.strFile("h", self.io))
def test_1_3(self):
self.assertEqual(True, text.strFile("i", self.io))
def test_1_4(self):
self.assertEqual(True, text.strFile("s", self.io))
def test_1_5(self):
self.assertEqual(True, text.strFile("n", self.io))
def test_1_6(self):
self.assertEqual(True, text.strFile("g", self.io))
def test_3_1(self):
self.assertEqual(True, text.strFile("thi", self.io))
def test_3_2(self):
self.assertEqual(True, text.strFile("his", self.io))
def test_3_3(self):
self.assertEqual(True, text.strFile("is ", self.io))
def test_3_4(self):
self.assertEqual(True, text.strFile("ing", self.io))
def test_3_f(self):
self.assertEqual(False, text.strFile("bla", self.io))
def test_large_1(self):
self.assertEqual(True, text.strFile("this is a test", self.io))
def test_large_2(self):
self.assertEqual(True, text.strFile("is a test string", self.io))
def test_large_f(self):
self.assertEqual(False, text.strFile("ds jhfsa k fdas", self.io))
def test_overlarge_f(self):
self.assertEqual(False, text.strFile("djhsakj dhsa fkhsa s,mdbnfsauiw bndasdf hreew", self.io))
def test_self(self):
self.assertEqual(True, text.strFile("this is a test string", self.io))
def test_insensitive(self):
self.assertEqual(True, text.strFile("ThIs is A test STRING", self.io, False))
| 26.744856 | 103 | 0.59086 |
f719f0bd0810de624991f194db2d5e2731bca1d7 | 2,976 | py | Python | etcd/setup.py | dvanderveer/integrations-core | 41dd9950296455457c9b7342584153678503d5aa | [
"BSD-3-Clause"
] | null | null | null | etcd/setup.py | dvanderveer/integrations-core | 41dd9950296455457c9b7342584153678503d5aa | [
"BSD-3-Clause"
] | null | null | null | etcd/setup.py | dvanderveer/integrations-core | 41dd9950296455457c9b7342584153678503d5aa | [
"BSD-3-Clause"
] | null | null | null | # Always prefer setuptools over distutils
from setuptools import setup
# To use a consistent encoding
from codecs import open
from os import path
import re
here = path.abspath(path.dirname(__file__))
# get the long description from the readme file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
runtime_reqs = ['datadog_checks_base']
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
for line in f.readlines():
line = line.strip()
if not line or line.startswith('--hash') or line[0] == '#':
continue
req = line.rpartition('#')
if not len(req[1]):
if '--hash=' in req[2]:
tokens = req[2].split()
if len(tokens) > 1:
runtime_reqs.append(tokens[0])
elif ';' in req[2]:
runtime_reqs.append(req[2])
else:
runtime_reqs.append(req[0])
def read(*parts):
with open(path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
# https://packaging.python.org/guides/single-sourcing-package-version/
version = find_version("datadog_checks", "etcd", "__init__.py")
setup(
name='datadog-etcd',
version=version,
description='The Etcd check',
long_description=long_description,
keywords='datadog agent etcd check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# The package we're going to ship
packages=['datadog_checks.etcd'],
# Run-time dependencies
install_requires=list(set(runtime_reqs)),
# Development dependencies, run with:
# $ pip install -e .[dev]
extras_require={
'dev': [
'check-manifest',
'datadog_agent_tk>=5.15',
],
},
# Testing setup and dependencies
tests_require=[
'nose',
'coverage',
'datadog_agent_tk>=5.15',
],
test_suite='nose.collector',
# Extra files to ship with the wheel package
package_data={b'datadog_checks.etcd': ['conf.yaml.example']},
include_package_data=True,
)
| 29.176471 | 70 | 0.612231 |
f719f96e68fd7b17d73ed6b9460ebade8987ebf6 | 4,908 | py | Python | parseepo/serialize.py | cverluise/parseEPO | be1171a0f8e6fcafa711fa291aebb1fc2260d5e6 | [
"MIT"
] | null | null | null | parseepo/serialize.py | cverluise/parseEPO | be1171a0f8e6fcafa711fa291aebb1fc2260d5e6 | [
"MIT"
] | 3 | 2021-02-02T22:38:50.000Z | 2021-08-23T20:41:10.000Z | parseepo/serialize.py | cverluise/parseEPO | be1171a0f8e6fcafa711fa291aebb1fc2260d5e6 | [
"MIT"
] | null | null | null | import html2text
import pandas as pd
from wasabi import Printer
from parseepo import validate
from parseepo.exception import SingleAttrException
from parseepo.utils import prepare_name
h = html2text.HTML2Text()
msg = Printer()
NAMES = ["EP", "Num", "Ext", "publication_date", "language", "attr", "text"]
NESTED_ATTR = ["TITLE", "CLAIM", "AMEND", "title", "claims", "amendment"]
def format_patent_df(
data: list, prepare_names: bool = False, handle_html: bool = False
):
"""
Return data as a prepared DataFrame from a list of rows
Nb: Input is [publication_number[Row]].
E.g. [['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],
['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],
...
:param data: List[List]
:param prepare_names: bool, True if you want to prepare names for BQ compatibility
:param handle_html: bool, True if you want to handle html
:return: pd.DataFrame
publication_date language attr text publication_number
0 1996-03-06 ... ... ... EP-0700059-A1
1 1996-03-06 ... ... ... EP-0700059-A1
2 1996-03-06 ... ... ... EP-0700059-A1
3 1996-03-06 ... ... ... EP-0700059-A1
4 1996-03-06 ... ... ... EP-0700059-A1
5 1996-03-06 ... ... ... EP-0700059-A1
6 1996-03-06 ... ... ... EP-0700059-A1
"""
df_ = pd.DataFrame(data, columns=NAMES)
df_["publication_number"] = df_["EP"] + "-" + df_["Num"] + "-" + df_["Ext"]
df_ = df_.drop(["EP", "Num", "Ext"], axis=1)
if prepare_names:
df_["attr"] = df_["attr"].apply(lambda x: prepare_name(x, True))
if handle_html:
df_["text"] = df_["text"].apply(lambda x: h.handle(x))
return df_
def unnest_attr(patent_dict: dict, publication_number: str):
"""
Unnest flat attributes returned as nested by the batch aggregation operation in
serialize_patent.
Raises warning if expected flat attributes has multiple values.
:param patent_dict: dict, returned by serialize_patent
:param publication_number: str, e.g. 'EP-0600083-A1'
:return: dict
In:
{ ...,
'PDFEP': {'language': ['en'],
'text': ['https://data.epo.org/publication-server/...']},
}
Out:
{...,
'PDFEP': 'https://data.epo.org/publication-server/...',}
"""
attrs = list(filter(lambda x: x not in NESTED_ATTR, patent_dict.keys()))
for attr in attrs:
val = patent_dict[attr]["text"]
try:
validate.single_attr(val, attr, publication_number)
except SingleAttrException:
msg.warn(
f"{publication_number}: {attr} has more than 1 value. Only the first value "
f"was kept. Add {attr} to the list NESTED_ATTR to fix this behavior."
)
patent_dict.update(
{
attr: {
"text": patent_dict[attr]["text"][0],
"language": patent_dict[attr]["language"][0],
}
}
)
def serialize_patent_df(patent_df: pd.DataFrame):
"""
Return the serialized patent
:param patent_df: pd.DataFrame, returned by format_patent_df
:return: dict
{'ABSTR': '<p id="pa01" num="0001">A device ...',
'CLAIM': {'language': ['en'],
'text': ['<claim id="c-en-0001" ...']},
'DESCR': '<heading id="h0001">Field of ...',
'PDFEP': 'https://data.epo.org/publication-server/...',
'TITLE': {'language': ['de', 'en', 'fr'],
'text': ['VORRICHTUNG ZUM ...',
'DEVICE FOR CONVEYING ...',
"DISPOSITIF D'ACHEMINEMENT ...']},
'publication_date': '1994-06-08',
'publication_number': 'EP-0600083-A1'}
"""
publication_number = patent_df["publication_number"].values[0]
publication_date = patent_df["publication_date"].values[0]
out = (
patent_df.drop(["publication_number", "publication_date"], axis=1)
.groupby("attr")
.aggregate(list)
.T.to_dict()
)
unnest_attr(out, publication_number)
out.update({"publication_number": publication_number})
out.update({"publication_date": publication_date})
return out
def serialize_patent(
data: list, prepare_names: bool = False, handle_html: bool = False
):
"""
Return the serialized patent
:param data: List[List[str]], E.g.
[['EP','0700059 A1','1996-03-06','de','TITLE',' Elektroma...'],
['EP','0700059 A1','1996-03-06','en','TITLE',' Electroma...'],
:param prepare_names: bool, True if you want to prepare names for BQ compatibility
:param handle_html: bool, True if you want to handle html
:return: dict
"""
out = format_patent_df(data, prepare_names, handle_html)
out = serialize_patent_df(out)
return out
| 36.355556 | 92 | 0.581296 |
f71a0da9d68a3d4c9024e6fcb718688385715211 | 83 | py | Python | buttonlist/src/buttonlist/__main__.py | pmfrank/beeware-tutorials | 96274b0a735bd468e946111baf441a527ff0b0d5 | [
"BSD-2-Clause"
] | 1 | 2021-06-04T05:51:39.000Z | 2021-06-04T05:51:39.000Z | buttonlist/src/buttonlist/__main__.py | pmfrank/beeware-tutorials | 96274b0a735bd468e946111baf441a527ff0b0d5 | [
"BSD-2-Clause"
] | null | null | null | buttonlist/src/buttonlist/__main__.py | pmfrank/beeware-tutorials | 96274b0a735bd468e946111baf441a527ff0b0d5 | [
"BSD-2-Clause"
] | null | null | null | from buttonlist.app import main
if __name__ == '__main__':
main().main_loop()
| 16.6 | 31 | 0.698795 |
f71a1e9ab3b466d5a052c9eb0a36e082154d5dbc | 1,747 | py | Python | igibson/robots/jr2_robot.py | suresh-guttikonda/iGibson | a69e623058180146466cd52d4bb3c00d1facdacf | [
"MIT"
] | 360 | 2020-04-02T11:12:09.000Z | 2022-03-24T21:46:58.000Z | igibson/robots/jr2_robot.py | suresh-guttikonda/iGibson | a69e623058180146466cd52d4bb3c00d1facdacf | [
"MIT"
] | 169 | 2020-04-07T21:01:05.000Z | 2022-03-31T10:07:39.000Z | igibson/robots/jr2_robot.py | suresh-guttikonda/iGibson | a69e623058180146466cd52d4bb3c00d1facdacf | [
"MIT"
] | 94 | 2020-04-09T23:22:17.000Z | 2022-03-17T21:49:03.000Z | import gym
import numpy as np
from igibson.robots.robot_locomotor import LocomotorRobot
class JR2(LocomotorRobot):
"""
JR2 robot (no arm)
Reference: https://cvgl.stanford.edu/projects/jackrabbot/
Uses joint velocity control
"""
def __init__(self, config):
self.config = config
self.velocity = config.get("velocity", 1.0)
LocomotorRobot.__init__(
self,
"jr2_urdf/jr2.urdf",
action_dim=4,
scale=config.get("robot_scale", 1.0),
is_discrete=config.get("is_discrete", True),
control="velocity",
)
def set_up_continuous_action_space(self):
"""
Set up continuous action space
"""
self.action_space = gym.spaces.Box(shape=(self.action_dim,), low=-1.0, high=1.0, dtype=np.float32)
self.action_high = self.velocity * np.ones([self.action_dim])
self.action_low = -self.action_high
def set_up_discrete_action_space(self):
"""
Set up discrete action space
"""
self.action_list = [
[self.velocity, self.velocity, 0, self.velocity],
[-self.velocity, -self.velocity, 0, -self.velocity],
[self.velocity, -self.velocity, -self.velocity, 0],
[-self.velocity, self.velocity, self.velocity, 0],
[0, 0, 0, 0],
]
self.action_space = gym.spaces.Discrete(len(self.action_list))
self.setup_keys_to_action()
def setup_keys_to_action(self):
self.keys_to_action = {
(ord("w"),): 0, # forward
(ord("s"),): 1, # backward
(ord("d"),): 2, # turn right
(ord("a"),): 3, # turn left
(): 4,
}
| 31.196429 | 106 | 0.566113 |
f71a1fb42d65587e922d09e984061b07a1aaed3f | 122 | py | Python | askci/plugins/pam_auth/__init__.py | hpsee/askci | ef1e2e75481b71db7fbe774cb81938055aa596d0 | [
"MIT"
] | 3 | 2019-11-21T09:04:36.000Z | 2019-11-23T13:29:43.000Z | askci/plugins/pam_auth/__init__.py | hpsee/askci | ef1e2e75481b71db7fbe774cb81938055aa596d0 | [
"MIT"
] | 13 | 2019-11-21T20:28:23.000Z | 2019-11-26T19:34:22.000Z | askci/plugins/pam_auth/__init__.py | hpsee/askci | ef1e2e75481b71db7fbe774cb81938055aa596d0 | [
"MIT"
] | null | null | null | AUTHENTICATION_BACKENDS = (
"django_pam.auth.backends.PAMBackend",
"django.contrib.auth.backends.ModelBackend",
)
| 24.4 | 48 | 0.754098 |
f71a3354afd52b38a1b508cdd629a00d472d8746 | 2,651 | py | Python | tests/test_logger.py | agraubert/agutil | d9a568df01959ed985c9c8e77bdd501ac13bdbbf | [
"MIT"
] | 3 | 2017-06-05T15:46:22.000Z | 2019-05-22T21:26:54.000Z | tests/test_logger.py | agraubert/agutil | d9a568df01959ed985c9c8e77bdd501ac13bdbbf | [
"MIT"
] | 93 | 2016-06-22T18:57:47.000Z | 2022-02-14T10:50:27.000Z | tests/test_logger.py | agraubert/agutil | d9a568df01959ed985c9c8e77bdd501ac13bdbbf | [
"MIT"
] | null | null | null | import unittest
import unittest.mock
import os
from py_compile import compile
import sys
import random
import time
import tempfile
from filecmp import cmp
def make_random_string(length=25, lower=0, upper=255):
return "".join(chr(random.randint(lower,upper)) for i in range(length))
def tempname():
(handle, name) = tempfile.mkstemp()
os.close(handle)
return name
class test(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.script_path = os.path.join(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)
),
"agutil",
"src",
"logger.py"
)
cls.data_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'data',
'logger'
)
sys.path.append(os.path.dirname(os.path.dirname(cls.script_path)))
random.seed()
def test_compilation(self):
compiled_path = compile(self.script_path)
self.assertTrue(compiled_path)
@unittest.skipIf(sys.platform.startswith('win'), "Tempfile cannot be used in this way on Windows")
def test_basic_logging(self):
import agutil.src.logger
time_mock = unittest.mock.Mock(side_effect = lambda fmt, time=0:fmt)
agutil.src.logger.time.strftime = time_mock
output_file = tempname()
log = agutil.src.logger.Logger(output_file, loglevel=agutil.src.logger.Logger.LOGLEVEL_DETAIL)
log.log("Test message")
log.log("More messages!", sender="me")
log.log("OH NO! This one's an error!", "Foo", "ERROR")
foo_bound = log.bindToSender("Foo")
log.mute("Foo", "Bar")
foo_bound("Message 1")
foo_bound("Message 2")
log.log("This should appear in the log, but not the dump", "Bar", "WARN")
foo_bound("Message 3")
log.unmute("Foo")
log.log("I've been unmuted!", "Foo")
log.log("This should be a warning", "Anyone", "BLORG")
time.sleep(.2)
log.addChannel("BLORG", 15)
log.setChannelCollection("BLORG", True)
log.log("This should be seen", "Anyone", "BLORG")
log.setChannelCollection("WARN", False)
log.setChannelCollection("WARN", True)
time.sleep(.2)
log.log("This should appear in the dump", "Bar", "WARN")
time.sleep(.1)
self.assertFalse(log.close())
self.assertTrue(cmp(
output_file,
os.path.join(
self.data_path,
'logger_compare.txt'
)
))
os.remove(output_file)
| 32.728395 | 102 | 0.590343 |
f71a3706a5e1e09a9b5ac6542d63281e2cb4bab7 | 1,370 | py | Python | tests/test_platform_api.py | jain-aayush1123/here-location-services-python | 11ad5ef8273b4f243c43bc00ebd470f725b980bc | [
"Apache-2.0"
] | 16 | 2021-02-15T13:49:29.000Z | 2022-03-29T10:34:43.000Z | tests/test_platform_api.py | jain-aayush1123/here-location-services-python | 11ad5ef8273b4f243c43bc00ebd470f725b980bc | [
"Apache-2.0"
] | 8 | 2021-02-27T18:40:46.000Z | 2021-10-03T15:49:27.000Z | tests/test_platform_api.py | jain-aayush1123/here-location-services-python | 11ad5ef8273b4f243c43bc00ebd470f725b980bc | [
"Apache-2.0"
] | 11 | 2021-02-16T04:58:08.000Z | 2022-02-21T20:51:55.000Z | # Copyright (C) 2019-2021 HERE Europe B.V.
# SPDX-License-Identifier: Apache-2.0
"""This module will test platform api module."""
import pytest
from requests_oauthlib import OAuth1
from here_location_services.platform.apis.aaa_oauth2_api import AAAOauth2Api
from here_location_services.platform.apis.api import Api as PlaformApi
from here_location_services.utils import get_apikey
from tests.conftest import get_mock_response
LS_API_KEY = get_apikey()
def test_api_headers_property():
api = PlaformApi(access_token="dummy")
assert api.headers == {"Authorization": "Bearer dummy"}
def test_mock_request_post(mocker):
mocker.patch("requests.post", return_value=True)
api = PlaformApi(access_token="dummy")
resp = api.post("dummy_url", data={"foo": "bar"})
assert resp is True
def test_mock_request_scoped_access_token_excception(mocker):
reason = "This is mock reason"
text = "This is mock text"
mock_response = get_mock_response(500, reason, text)
mocker.patch("here_location_services.platform.apis.api.Api.post", return_value=mock_response)
aaa_api = AAAOauth2Api(base_url="dummy")
oauth = OAuth1(
"dummy_key",
client_secret="dummy_secret",
signature_method="HMAC-SHA256",
)
with pytest.raises(Exception):
aaa_api.request_scoped_access_token(oauth=oauth, data="dummy_data")
| 34.25 | 97 | 0.750365 |
f71a703f2090876a8e79cf5a51d2bb5e3344842c | 153,793 | py | Python | spyke/sort.py | spyke/spyke | 20934521de9c557924911cf6190690ac1c6f8e80 | [
"CNRI-Python"
] | 22 | 2015-06-01T03:31:00.000Z | 2022-03-18T09:12:28.000Z | spyke/sort.py | spyke/spyke | 20934521de9c557924911cf6190690ac1c6f8e80 | [
"CNRI-Python"
] | 3 | 2017-03-24T19:16:02.000Z | 2021-01-27T14:34:30.000Z | spyke/sort.py | spyke/spyke | 20934521de9c557924911cf6190690ac1c6f8e80 | [
"CNRI-Python"
] | 6 | 2015-07-10T15:28:08.000Z | 2022-03-17T19:30:45.000Z | """Spike sorting classes and window"""
from __future__ import division
from __future__ import print_function
__authors__ = ['Martin Spacek', 'Reza Lotun']
import os
import sys
import time
import datetime
from copy import copy
import operator
import random
import shutil
import hashlib
import multiprocessing as mp
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QAction, QIcon, QApplication
import numpy as np
import scipy
import scipy.signal
#from scipy.cluster.hierarchy import fclusterdata
import pylab as pl
import pyximport
pyximport.install(build_in_temp=False, inplace=True)
from . import util # .pyx file
from . import core
from .core import (WaveForm, Gaussian, MAXLONGLONG, R, toiter, intround, printflush, lstrip,
rstrip, lrstrip, pad, td2days, SpykeToolWindow, NList, NSList, dist,
USList, ClusterChange, SpikeSelectionSlider, lrrep2Darrstripis, rollwin2D)
from .detect import DEBUG
from .surf import EPOCH
from .plot import SpikeSortPanel, CLUSTERCOLOURDICT, WHITE
from .__version__ import __version__
#MAXCHANTOLERANCE = 100 # um
NSLISTWIDTH = 70 # minimize nslist width, enough for 7 digit spike IDs
PANELWIDTHPERCOLUMN = 120 # sort panel width per column of channels
PANELHEIGHTPERROW = 50 # sort panel height per row of channels
VSCROLLBARWIDTH = 14 # hack
SORTWINDOWHEIGHT = 1035 # TODO: this should be set programmatically
MINSORTWINDOWWIDTH = 566
MEANWAVEMAXSAMPLES = 2000
NPCSPERCHAN = 7
PCALIB = 'mdp'
ICALIB = 'sklearn'
DEFMINISI = 50 # default minimum ISI to check for on export, us
MAXGROUPISI = 100000 # us (100 ms)
MAXGROUPDT = 100000000 # us (100 s)
class Sort(object):
"""A spike sorting session, in which you can detect spikes and sort them into Neurons.
A .sort file is a single Python2-pickled Sort object. A .json file is a
jsonpickle-pickled Sort object"""
def __init__(self, detector=None, stream=None, tw=None):
self.__version__ = __version__
self.fname = ''
self.user = ''
self.notes = ''
self.detector = detector # this Sort's current Detector object
self.tw = tw # time window (us) relative to spike time
self.stream = stream
self.probe = stream.probe # only one probe design per sort allowed
self.converter = stream.converter
self.neurons = {}
self.clusters = {} # neurons with multidm params scaled for plotting
self.norder = [] # stores order of neuron ids display in nlist
self.npcsperchan = NPCSPERCHAN
def get_nextnid(self):
"""nextnid is used to retrieve the next unique single unit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return 1 # single unit nids start at 1
else:
return max(max(nids) + 1, 1) # at least 1
nextnid = property(get_nextnid)
def get_nextmuid(self):
"""nextmuid is used to retrieve the next unique multiunit ID"""
nids = list(self.neurons)
if len(nids) == 0:
return -1 # multiunit ids start at -1
else:
return min(min(nids) - 1, -1) # at most -1
nextmuid = property(get_nextmuid)
def get_good(self):
"""Return array of nids marked by user as 'good'"""
good = []
for neuron in self.neurons.values():
try:
if neuron.good:
good.append(neuron.id)
except AttributeError: # neuron is from older sort, no .good attrib
neuron.good = False
return np.asarray(good)
def set_good(self, good):
"""Set good flag to True for nids in good, False otherwise"""
nids = list(self.neurons)
assert np.all([ nid in nids for nid in good ]) # make sure all nids in good exist
notgood = np.setdiff1d(nids, good)
for nid in notgood:
neuron = self.neurons[nid]
neuron.good = False
for nid in good:
neuron = self.neurons[nid]
neuron.good = True
good = property(get_good, set_good)
def get_stream(self):
try:
return self._stream
except AttributeError:
# this is likely a brand new sort, has yet to be assigned a Stream
return None
def set_stream(self, stream=None):
"""Check stream type and name and probe type, and restore filtmeth, car, sampfreq and
shcorrect to stream when binding/modifying stream to self"""
oldstream = self.stream
if stream != None and oldstream != None:
# do stream types match?
if type(stream) != type(oldstream):
raise ValueError("Stream types don't match: %s, %s"
% (type(oldstream), type(stream)))
# do stream probe types match?
if type(stream.probe) != type(oldstream.probe):
raise ValueError("Stream probe types don't match: %s, %s"
% (type(oldstream.probe), type(stream.probe)))
# is one stream fname a superset of the other?
if (stream.fname not in oldstream.fname) and (oldstream.fname not in stream.fname):
raise ValueError("Stream file names are not supersets of each other: %s, %s"
% (oldstream.fname, stream.fname))
else:
print('Stream file names are similar enough to proceed: %s, %s'
% (stream.fname, oldstream.fname))
try:
stream.filtmeth = self.filtmeth
stream.car = self.car
stream.sampfreq = self.sampfreq
stream.shcorrect = self.shcorrect
except AttributeError:
pass # one of the above aren't bound
self._stream = stream # set it
print('Bound stream %r to sort %r' % (stream.fname, self.fname))
# now that tres is known, calculate window timepoints wrt spike time:
self.calc_twts_twi()
stream = property(get_stream, set_stream)
def calc_twts_twi(self):
"""Calculate temporal window timepoints wrt spike time, and the indices of these
timepoints wrt spike time"""
tres = self.tres
tw = self.tw
twts = np.arange(tw[0], tw[1], tres)
twts += twts[0] % tres # get rid of mod, so twts go through zero
self.twts = twts
self.twi = intround(twts[0] / tres), intround(twts[-1] / tres)
#info('twi = %s' % (self.twi,))
def update_tw(self, tw):
"""Update tw and everything that depends on it. Note that this shouldn't
be called directly by the user. Call SpykeWindow.update_spiketw() instead"""
oldtw = self.tw
self.tw = tw
self.calc_twts_twi()
dtw = np.asarray(tw) - np.asarray(oldtw) # new minus old
self.spikes['t0'] += dtw[0]
self.spikes['t1'] += dtw[1]
self.spikes['tis'] = self.spikes['tis'] - intround(dtw[0] / self.tres)
# recalculate any existing templates:
for neuron in self.neurons.values():
if neuron.wave.data != None:
neuron.update_wave()
print('WARNING: all spike waveforms need to be reloaded!')
def get_tres(self):
return self.stream.tres
tres = property(get_tres)
def __getstate__(self):
"""Get object state for pickling"""
# copy it cuz we'll be making changes, this is fast because it's just a shallow copy
d = self.__dict__.copy()
# Spikes and wavedata arrays are (potentially) saved separately.
# usids and PCs/ICs can be regenerated from the spikes array.
for attr in ['spikes', 'wavedata', 'usids', 'X', 'Xhash']:
# keep _stream during normal pickling for multiprocessing, but remove it
# manually when pickling to sort file
try: del d[attr]
except KeyError: pass
return d
def get_nspikes(self):
try: return len(self.spikes)
except AttributeError: return 0
nspikes = property(get_nspikes)
def update_usids(self):
"""Update usids, which is an array of indices of unsorted spikes"""
nids = self.spikes['nid']
self.usids, = np.where(nids == 0) # 0 means unclustered
def get_spikes_sortedby(self, attr='id'):
"""Return array of all spikes, sorted by attribute 'attr'"""
vals = self.spikes[attr]
spikes = self.spikes[vals.argsort()]
return spikes
def get_wave(self, sid):
"""Return WaveForm corresponding to spike sid"""
spikes = self.spikes
nchans = spikes['nchans'][sid]
chans = spikes['chans'][sid, :nchans]
t0 = spikes['t0'][sid]
t1 = spikes['t1'][sid]
wavedata = self.wavedata[sid, 0:nchans]
ts = np.arange(t0, t1, self.tres) # build them up
return WaveForm(data=wavedata, ts=ts, chans=chans, tres=self.tres)
def get_maxchan_wavedata(self, sid=None, nid=None):
"""Return wavedata of maxchan of spike sid or neuron nid"""
if sid != None:
assert nid == None
chani = self.spikes['chani'][sid]
return self.wavedata[sid, chani]
elif nid != None:
assert sid == None
neuron = self.neurons[nid]
chani, = np.where(neuron.chans == neuron.chan)
assert len(chani) == 1
chani = chani[0] # pull out of length 1 array
return neuron.wave.data[chani]
def get_mean_wave(self, sids, nid=None):
"""Return the mean and std waveform of spike waveforms in sids"""
spikes = self.spikes
nsids = len(sids)
if nsids > MEANWAVEMAXSAMPLES:
step = nsids // MEANWAVEMAXSAMPLES + 1
s = ("get_mean_wave() sampling every %d spikes instead of all %d"
% (step, nsids))
if nid != None:
s = "neuron %d: " % nid + s
print(s)
sids = sids[::step]
nsids = len(sids) # update
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
chanslist = [ chans[:nchans] for chans, nchans in zip(chanss, nchanss) ] # list of arrays
chanpopulation = np.concatenate(chanslist)
groupchans = np.unique(chanpopulation) # comes out sorted
wavedata = self.wavedata[sids]
if wavedata.ndim == 2: # should be 3, get only 2 if nsids == 1
wavedata.shape = 1, wavedata.shape[0], wavedata.shape[1] # give it a singleton 3rd dim
nt = wavedata.shape[-1]
maxnchans = len(groupchans)
data = np.zeros((maxnchans, nt))
# all spikes have same nt, but not necessarily same nchans, keep track of
# how many spikes contributed to each of the group's chans
nspikes = np.zeros((maxnchans, 1), dtype=int)
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
data[chanis] += wd[:len(chans)] # accumulate
nspikes[chanis] += 1 # inc spike count for this spike's chans
#t0 = time.time()
data /= nspikes # normalize all data points appropriately, this is now the mean
var = np.zeros((maxnchans, nt))
for chans, wd in zip(chanslist, wavedata):
chanis = groupchans.searchsorted(chans) # each spike's chans is a subset of groupchans
var[chanis] += (wd[:len(chans)] - data[chanis]) ** 2 # accumulate 2nd moment
var /= nspikes # normalize all data points appropriately, this is now the variance
std = np.sqrt(var)
# keep only those chans that at least 1/2 the spikes contributed to
bins = list(groupchans) + [np.inf] # concatenate rightmost bin edge
hist, bins = np.histogram(chanpopulation, bins=bins)
chans = groupchans[hist >= nsids/2]
chanis = groupchans.searchsorted(chans)
data = data[chanis]
std = std[chanis]
return WaveForm(data=data, std=std, chans=chans)
def check_ISIs(self, nids='good'):
"""Check that interspike intervals of spikes in each nid never fall below DEFMINISI"""
print('Checking inter-spike intervals')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
ndupl = (np.diff(spikets) < DEFMINISI).sum()
if ndupl > 0:
msg = ('n%d has %d duplicate spikes (given DEFMINISI=%d us).\n'
'Remove duplicate spikes with the ISI tool in the Verify tab'
% (nid, ndupl, DEFMINISI))
raise RuntimeError(msg)
def check_wavealign(self, nids='good', maxdti=1):
"""Check that each neurons's primary peak on the max chan is no more than +/- maxdti
timepoints away from the t=0 alignment timepoint"""
print('Checking neuron mean waveform alignment')
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
nt = self.twi[1] - self.twi[0] + 1 # expected number of points of each chan's wavedata
for nid in nids:
neuron = self.neurons[nid]
wd = self.get_maxchan_wavedata(nid=nid)
assert len(wd) == nt
# find biggest positive and negative peaks, check which comes first, ensure
# the primary peak is within maxdti of t=0 alignment timepoint:
ppeakis, _ = scipy.signal.find_peaks(wd) # positive peak indices
npeakis, _ = scipy.signal.find_peaks(-wd) # negative peak indices
pmaxi = ppeakis[wd[ppeakis].argmax()] # max positive peak index
nmaxi = npeakis[wd[npeakis].argmin()] # max negative peak index
if nmaxi < pmaxi: # usual case: -ve then +ve peak
peak1i = nmaxi
else: # less common: +ve then -ve peak, make sure +ve peak is worthy of alignment
pmax, nmax = wd[pmaxi], wd[nmaxi]
if pmax > abs(nmax): # +ve peak is bigger than -ve peak, align to +ve peak
peak1i = pmaxi
else:
peak1i = nmaxi # default to -ve peak
alignti = 0 - self.twi[0] # +ve
dti = peak1i - alignti
#print("n%d: dti=%d" % (nid, dti))
if abs(dti) > maxdti:
peak1uV = self.converter.AD2uV(wd[peak1i])
peak1us = intround(self.tres*(peak1i-alignti))
msg = ('Primary peak (%+d uV @ t=%d us) of n%d is %+d timepoints away from '
'the t=0 us alignment point. Shift it closer and try again'
% (peak1uV, peak1us, nid, dti))
raise RuntimeError(msg)
def check_wavepadding(self, nids='good', npad=2):
"""Check if any spikes are edge padded, presumably due to being shifted but not
reloaded. For robustness, check for consistent signs of padding across all channels.
An edge is considered padded if it does not change over npad datapoints"""
print('Checking spike waveform padding')
assert npad >= 2 # need at least 2 points to do a diff
if nids == 'good':
nids = self.good
elif nids == 'all':
nids = sorted(self.neurons)
for nid in nids:
neuron = self.neurons[nid]
for sid in neuron.sids:
wd = self.wavedata[sid] # multichannel waveform data
# are left and right edges of wavedata identical for npad number of points?
l, r = wd[:, :npad], wd[:, -npad:] # shape (nchans, npad)
leftpadded = (np.diff(l, axis=1) == 0).all()
rightpadded = (np.diff(r, axis=1) == 0).all()
# handle case where spike is right after or right before a 0-padded
# region of data due to gaps between experiments:
if leftpadded:
if (wd[:, 0] == 0).all():
leftpadded = False
if rightpadded:
if (wd[:, -1] == 0).all():
rightpadded = False
if leftpadded or rightpadded:
msg = ('n%d has s%d that looks like it has been padded.\n'
'leftpadded, rightpadded = %r, %r\n'
'Reload s%d or n%d or all spikes and try again'
% (nid, sid, leftpadded, rightpadded, sid, nid))
raise RuntimeError(msg)
def check_contiguous_nids(self):
"""Check that neuron IDs are contiguous (no gaps)"""
print('Checking that neuron IDs are contiguous')
nids = np.array(list(self.neurons))
nids = nids[nids > 0] # only consider +ve nids
nids.sort()
if (np.diff(nids) != 1).any():
raise RuntimeError('Neuron IDs are not contiguous, renumber all and try again')
def exportptcsfiles(self, basepath, sortpath, user='', notes=''):
"""Export spike data to binary .ptcs files under basepath, one file per recording"""
# First check to make sure various things are OK before exporting:
self.check_ISIs()
self.check_wavealign()
self.check_wavepadding()
self.check_contiguous_nids()
spikes = self.spikes
exportdt = str(datetime.datetime.now()) # get an export datetime stamp
exportdt = exportdt.split('.')[0] # ditch the us
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting "good" clusters to:')
# do a separate export for each recording:
# absolute start and stop times of all streams, rounded to nearest raw timepoint:
tranges = self.stream.tranges
t0 = tranges[0, 0] # absolute start time of first stream
for stream, trange in zip(streams, tranges):
abst0 = trange[0] # absolute start time of this stream relative to t0
# time delta between this stream and first stream, to nearest raw timepoint, us:
dt = abst0 - t0
dt = intround(dt) # to nearest int us
self.exportptcsfile(stream, basepath, dt, exportdt, sortpath,
user=user, notes=notes)
def exportptcsfile(self, stream, basepath, dt, exportdt, sortpath, user='', notes=''):
"""Export spike data of all "good" spikes to binary .ptcs file in basepath.
Constrain to spikes in stream, and undo any time delta in spike times.
dt is the integer time difference between start of stream and start of first stream in
the track, rounded to the nearest us (spike times are stored as int64 us in .ptcs)"""
# build up list of PTCSNeuronRecords that have spikes in this stream,
# and tally their spikes
nsamplebytes = 4 # float32
nrecs = []
nspikes = 0
# only export neurons marked as "good", could be single or multi unit:
for nid in sorted(self.good):
neuron = self.neurons[nid]
spikets = self.spikes['t'][neuron.sids] # should be a sorted copy
assert spikets.flags['OWNDATA'] # safe to modify in place
spikets.sort() # just in case it isn't perfectly sorted
spikets -= dt # export spike times relative to t=0 of this recording
# only include spikes that occurred during this recording
lo, hi = spikets.searchsorted([stream.t0, stream.t1])
spikets = spikets[lo:hi]
if len(spikets) == 0:
continue # don't save empty neurons
nrec = PTCSNeuronRecord(neuron, spikets, nsamplebytes, descr='')
nrecs.append(nrec)
nspikes += len(spikets)
nneurons = len(nrecs)
# create the header and write everything to file:
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
fname = stream.srcfnameroot + '.ptcs'
fullfname = os.path.join(path, fname)
header = PTCSHeader(self, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user=user, notes=notes)
with open(fullfname, 'wb') as f:
header.write(f)
for nrec in nrecs:
nrec.write(f)
print(fullfname)
def exportcsv(self, fname):
"""Export all "good" spikes to a .csv file with time (s), nid, and maxchan as the
columns"""
sids = []
#chans = []
for nid in sorted(self.good):
neuron = self.neurons[nid]
sids.append(neuron.sids)
# the alternative is to export each spike's unit's channel:
#chans.append(np.tile(neuron.chan, neuron.nspikes))
sids = np.hstack(sids)
spikes = self.spikes[sids]
tsecs = spikes['t'] / 1e6 # convert from us to s
nids = spikes['nid']
chans = spikes['chan']
#chans = np.hstack(chans)
data = np.column_stack([tsecs, nids, chans])
print('Exporting (tsec, nid, chan) of all spikes marked as "good" to %s' % fname)
np.savetxt(fname, data, fmt='%.6f, %d, %d')
def exporttschid(self, basepath):
"""Export int64 (timestamp, channel, neuron id) 3 tuples to binary file"""
raise NotImplementedError('Needs to be redone to work with multiple streams')
spikes = self.spikes[self.spikes['nid'] > 0] # don't export unsorted/multiunit spikes
dt = str(datetime.datetime.now()) # get an export timestamp
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
srffnameroot = srffnameroot.replace(' ', '_')
tschidfname = dt + '_' + srffnameroot + '.tschid'
tschid = np.empty((len(spikes), 3), dtype=np.int64)
tschid[:, 0] = spikes['t']
tschid[:, 1] = spikes['chan']
tschid[:, 2] = spikes['nid']
tschid.tofile(os.path.join(path, tschidfname)) # save it
print(tschidfname)
def exportdin(self, basepath):
"""Export stimulus din(s) to binary .din file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
dinfiledtype=[('TimeStamp', '<i8'), ('SVal', '<i8')] # pairs of int64s
print('Exporting DIN(s) to:')
for stream in streams:
try: # neither of these attribs should exist for recordings with no stimuli:
svrecs = stream.srff.digitalsvalrecords
dsprecs = stream.srff.displayrecords
except AttributeError:
continue # no din to export for this stream
if len(svrecs) == 0 or stream.srff.ndigitalsvalrecords == 0:
raise ValueError("digitalsvalrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# upcast SVal field from uint16 to int64, creates a copy,
# but it's not too expensive:
svrecs = svrecs.astype(dinfiledtype)
# convert to normal n x 2 int64 array
svrecs = svrecs.view(np.int64).reshape(-1, 2)
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one .din
# per displayrecord. Append experiment ID to each .din filename, if necessary.
svrects = svrecs[:, 0]
dsprects = [ dsprec.TimeStamp for dsprec in dsprecs ]
svalrecis = svrects.searchsorted(dsprects)
assert svalrecis[0] == 0
svalrecis = svalrecis[1:] # exclude the trivial 0 index
# split sval records according to displayrecord timestamps:
dins = np.split(svrecs, svalrecis)
assert len(dins) == len(dsprecs)
for eid, din in enumerate(dins):
if eid == 0 and len(dins) == 1:
eidstr = ''
elif len(dins) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
dinfname = stream.srcfnameroot + eidstr + '.din'
fullfname = os.path.join(path, dinfname)
din.tofile(fullfname) # save it
print(fullfname)
def exporttextheader(self, basepath):
"""Export stimulus text header(s) to .textheader file(s) in basepath"""
if self.stream.is_multi(): # self.stream is a MultiStream
streams = self.stream.streams
else: # self.stream is a single Stream
streams = [self.stream]
print('Exporting text header(s) to:')
for stream in streams:
try:
dsprecs = stream.srff.displayrecords
except AttributeError: # no textheader to export for this stream
continue
if len(dsprecs) == 0:
raise ValueError("displayrecords are empty for stream %r. Attribute "
"shouldn't exist" % stream.fname)
path = os.path.join(basepath, stream.srcfnameroot)
try: os.mkdir(path)
except OSError: pass # path already exists?
# Some old recordings (<= ptc15) contain multiple experiments.
# To deal with this, iterate over stream.srff.displayrecords, export one
# .textheader per displayrecord. Append experiment ID to each .textheader
# filename, if necessary.
for eid, dsprec in enumerate(dsprecs):
textheader = dsprec.Header.python_tbl
if eid == 0 and len(dsprecs) == 1:
eidstr = ''
elif len(dsprecs) < 10:
eidstr = '.%d' % eid
else: # include leading zero to maintain alphabetical fname order
eidstr = '.%02d' % eid
textheaderfname = stream.srcfnameroot + eidstr + '.textheader'
fullfname = os.path.join(path, textheaderfname)
with open(fullfname, 'w') as f:
f.write(textheader) # save it
print(fullfname)
def exportall(self, basepath, sortpath):
"""Export spike data, stimulus din and textheader to basepath"""
self.exportptcsfiles(basepath, sortpath)
self.exportdin(basepath)
self.exporttextheader(basepath)
def exportspikewaves(self, sids, selchans, tis, fname, format):
"""Export spike waveform data of selected sids, selchans and tis to binary
.spikes.zip file or text .spikes.csv file"""
nspikes = len(sids)
chans, chanslist = self.get_common_chans(sids, selchans)
nchans = len(chans)
ti0, ti1 = tis
nt = ti1 - ti0
# fill in 3D data array:
dtype = self.wavedata.dtype
data = np.zeros((nspikes, nchans, nt), dtype=dtype)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
if format == 'text': # flatten timepoints of all chans into columns
data.shape = nspikes, nchans*nt
stream = self.stream
assert stream.kind == 'highpass' # should be the only type ever saved to self
if format == 'binary':
nids = self.spikes['nid'][sids]
spiketimes = self.spikes['t'][sids]
chanpos = stream.probe.siteloc_arr()
uVperAD = stream.converter.AD2uV(1) # convert 1 AD unit to uV
with open(fname, 'wb') as f:
np.savez_compressed(f, data=data, sids=sids, nids=nids,
spiketimes=spiketimes, chans=chans, tis=tis,
chanpos=chanpos, uVperAD=uVperAD)
elif format == 'text':
np.savetxt(fname, data, fmt='%d', delimiter=',') # data should be int
else:
raise ValueError('Unknown format: %r' % format)
print('Exported %d spikes on chans=%r and tis=%r to %s'
% (nspikes, list(chans), list(tis), fname))
def get_param_matrix(self, kind=None, sids=None, tis=None, selchans=None, norm=False,
dims=None, scale=True):
"""Organize dims parameters from sids into a data matrix, each column
corresponding to a dim. To do PCA/ICA clustering on all spikes, one maxchan at
a time, caller needs to call this multiple times, one for each set of
maxchan unique spikes,"""
spikes = self.spikes
dtypefields = list(spikes.dtype.fields)
if sids is None:
sids = spikes['id'] # default to all spikes
comps = [ dim for dim in dims if dim.startswith('c') and dim[-1].isdigit() ]
rmserror = np.any([ dim == 'RMSerror' for dim in dims ])
ncomp = len(comps)
hascomps = ncomp > 0
if hascomps:
X = self.get_component_matrix(kind, sids, tis=tis, chans=selchans,
minncomp=ncomp, norm=norm)
if rmserror:
rms = self.get_rms_error(sids, tis=tis, chans=selchans)
data = []
for dim in dims:
if dim in dtypefields:
data.append( np.float32(spikes[dim][sids]) )
elif dim.startswith('c') and dim[-1].isdigit():
compid = int(lstrip(dim, 'c'))
data.append( np.float32(X[:, compid]) )
elif dim == 'RMSerror':
data.append( np.float32(rms) )
else:
raise RuntimeError('Unknown dim %r' % dim)
# np.column_stack returns a copy, not modifying the original array
data = np.column_stack(data)
if scale:
# ensure 0 mean, and unit variance/stdev
for dim, d in zip(dims, data.T): # d iterates over columns
d -= d.mean()
if dim in ['x0', 'y0'] and self.probe.ncols > 1:
try: x0std # normalize spatial params by x0 std
except NameError: x0std = spikes['x0'].std()
if x0std != 0.0:
d /= x0std
#elif dim == 't': # the longer the recording in hours, the greater the
# # scaling in time
# trange = d.max() - d.min()
# tscale = trange / (60*60*1e6)
# d *= tscale / d.std()
else: # normalize all other dims by their std
dstd = d.std()
if dstd != 0.0:
d /= dstd
return data
def get_component_matrix(self, kind, sids, tis=None, chans=None, minncomp=None,
norm=False):
"""Find set of chans common to all sids, and do PCA/ICA on those waveforms. Or,
if chans are specified, limit PCA/ICA to them. Return component matrix with at
least minncomp dimensions"""
spikes = self.spikes
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nspikes < 2:
raise RuntimeError("Need at least 2 spikes for %s" % kind)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for %s" % kind)
# check if desired components have already been calculated (cache hit):
Xhash = self.get_Xhash(kind, sids, tis, chans, self.npcsperchan, norm)
self.Xhash = Xhash # save as key to most recent component matrix in self.X
try: self.X
except AttributeError: self.X = {} # init the dimension reduction cache attrib
if Xhash in self.X:
print('Cache hit, using cached %ss from tis=%r, chans=%r of %d spikes' %
(kind[:-1], list(tis), list(chans), nspikes))
return self.X[Xhash] # no need to recalculate
print('Cache miss, (re)calculating %ss' % kind[:-1])
# collect data between tis from chans from all spikes:
print('Doing %s on tis=%r, chans=%r of %d spikes' %
(kind, list(tis), list(chans), nspikes))
# MDP complains of roundoff errors with float32 for large covariance matrices
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
spikedata = self.wavedata[sid][spikechanis, ti0:ti1]
if norm:
# normalize by Vpp of chan with max Vpp:
maxptp = spikedata.ptp(axis=1).max()
if maxptp != 0: # prevent div by 0
spikedata = spikedata / maxptp
data[sii] = spikedata
print('Input shape for %s: %r' % (kind, data.shape))
t0 = time.time()
data.shape = nspikes, nchans*nt # flatten timepoints of all chans into columns
print('Reshaped input for %s: %r' % (kind, data.shape))
if kind == 'PCA': # principal components analysis
if PCALIB == 'mdp':
import mdp # delay as late as possible
X = mdp.pca(data, output_dim=5, svd=False) # svd=False is default
elif PCALIB == 'sklearn':
# sklearn's PCA is about 8x slower than mdp.pca, I think because it
# doesn't tap into scipy.linalg.eig compiled code. RandomizedPCA is faster
# than PCA, but isn't deterministic, and is still 2-3x slower than mdp.pca
from sklearn.decomposition import PCA
pca = PCA(n_components=5)
X = pca.fit_transform(data) # do both the fit and the transform
else:
raise ValueError('Invalid PCALIB %r' % PCALIB)
if X.shape[1] < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
elif kind == 'sPCA': # sparse principal components analysis
from sklearn.decomposition import SparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
spca = SparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = spca.fit_transform(data) # do both the fit and the transform
elif kind == 'mbsPCA': # mini batch sparse principal components analysis
from sklearn.decomposition import MiniBatchSparsePCA
n_components = 5
alpha = 1 # sparseness parameter
n_jobs = mp.cpu_count()
mbspca = MiniBatchSparsePCA(n_components=n_components, alpha=alpha, n_jobs=n_jobs)
X = mbspca.fit_transform(data) # do both the fit and the transform
elif kind == 'NMF': # non-negative matrix factorization
from sklearn.decomposition import NMF
n_components = 5
init = None # 'random', 'nndsvd', 'nndsvda', 'nndsvdar', 'custom'
nmf = NMF(n_components=n_components, init=init)
X = nmf.fit_transform(data) # do both the fit and the transform
elif kind == 'tSNE': # t-distributed stochastic neighbor embedding
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, data.shape[1]))
print('ncomp: %d' % ncomp)
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
data = mdp.pca(data, output_dim=ncomp)
from sklearn.manifold import TSNE
n_components = 3 # not suited for any more than 3, according to the paper
#init = 'random', 'pca'
tsne = TSNE(n_components=n_components)
X = tsne.fit_transform(data) # do both the fit and the transform
elif kind == 'ICA': # independent components analysis
# ensure nspikes >= ndims**2 for good ICA convergence
maxncomp = intround(np.sqrt(nspikes))
if maxncomp < minncomp:
raise RuntimeError("Can't satisfy minncomp=%d request" % minncomp)
if data.shape[0] <= data.shape[1]:
raise RuntimeError('Need more observations than dimensions for ICA')
# limit number of PCs to feed into ICA, keep up to npcsperchan components per
# chan on average:
ncomp = min((self.npcsperchan*nchans, maxncomp, data.shape[1]))
if ICALIB == 'mdp':
import mdp # delay as late as possible
# do PCA first, to reduce dimensionality and speed up ICA:
print('ncomp: %d' % ncomp)
data = mdp.pca(data, output_dim=ncomp)
# nonlinearity g='pow3', ie x**3. tanh seems to separate better,
# but is a bit slower. gaus seems to be slower still, and no better
# than tanh, but these are just vague impressions.
# defaults to whitened=False, ie assumes data isn't whitened
node = mdp.nodes.FastICANode(g='pow3')
X = node(data)
pm = node.get_projmatrix()
X = X[:, np.any(pm, axis=0)] # keep only the non zero columns
elif ICALIB == 'sklearn':
from sklearn.decomposition import FastICA
# when whiten=True (default), FastICA preprocesses the data using PCA, and
# n_components is the number of PCs that are kept before doing ICA.
alg = 'parallel' # parallel or deflation, default is parallel
fun = 'logcosh' # logcosh, exp, or cube, default is logcosh
maxiter = 100 # default is 200
tol = 0.5 # default is 0.0001, seems need >~ 0.1 to exit faster
## TODO: make FastICA algorithm (parallel, deflation), nonlinearity (logcosh,
## exp, cube) and IC sort method (abs(kurtosis) vs. negentropy) GUI options
print('ncomp=%d, alg=%r, fun=%r, maxiter=%d, tol=%g'
% (ncomp, alg, fun, maxiter, tol))
fastica = FastICA(n_components=ncomp, algorithm=alg,
whiten=True, fun=fun, fun_args=None,
max_iter=maxiter, tol=tol, w_init=None,
random_state=None)
X = fastica.fit_transform(data) # do both the fit and the transform
#pm = fastica.components_
print('fastica niters: %d' % (fastica.n_iter_))
else:
raise ValueError('Invalid ICALIB %r' % ICALIB)
if X.shape[1] < 3:
raise RuntimeError('Need at least 3 columns')
# Sort ICs by decreasing kurtosis or negentropy. For kurtosis, see Scholz2004 (or
# rather, opposite to their approach, which picked ICs with most negative
# kurtosis). For methods of estimating negentropy, see Hyvarinen1997.
'''
# sort by abs(kurtosis) of each IC (column)
k = scipy.stats.kurtosis(X, axis=0)
ki = abs(k).argsort()[::-1] # decreasing order of abs(kurtosis)
print('Sort by abs(kurtosis):')
print(k[ki])
X = X[:, ki] # sort the ICs
'''
# sort by negentropy of each IC (column), this seems to work better than kurtosis
# at separating clusters of similar size:
ne = core.negentropy(X, axis=0)
assert (ne > 0).all()
nei = ne.argsort()[::-1] # decreasing order of negentropy
print('Sort by negentropy:')
print(ne[nei])
X = X[:, nei] # sort the ICs
'''
import pylab as pl
pl.figure()
pl.imshow(pm)
pl.colorbar()
pl.title('original projmatrix')
pl.figure()
pl.imshow(pm[:, ki])
pl.colorbar()
pl.title('decreasing abs(kurtosis) projmatrix')
pl.figure()
pl.imshow(pm[:, nei])
pl.colorbar()
pl.title('decreasing negentropy projmatrix')
'''
else:
raise ValueError('Unknown kind %r' % kind)
print('Output shape for %s: %r' % (kind, X.shape))
self.X[Xhash] = X # cache for fast future retrieval
print('%s took %.3f sec' % (kind, time.time()-t0))
unids = list(np.unique(spikes['nid'][sids])) # set of all nids that sids span
for nid in unids:
# don't update pos of junk cluster, if any, since it might not have any chans
# common to all its spikes, and therefore can't have PCA/ICA done on it
if nid != 0:
self.clusters[nid].update_comppos(X, sids)
return X
def get_rms_error(self, sids, tis=None, chans=None):
"""Calculate RMS error of spike waveforms (all from the same cluster) relative to
their cluster's mean waveform. Consider only selected tis and chans"""
spikes = self.spikes
nids = np.unique(spikes['nid'][sids])
nid = nids[0]
if len(nids) > 1 or nid == 0:
raise RuntimeError("Spikes must all belong to the same (non-junk) cluster for "
"RMS error calculation")
nt = self.wavedata.shape[2]
if tis is None: # use full waveform
tis = np.asarray([0, nt])
#print('tis: %r' % (tis,))
ti0, ti1 = tis
assert ti0 < ti1 <= nt
nt = ti1 - ti0
chans, chanslist = self.get_common_chans(sids, chans)
nchans = len(chans)
nspikes = len(sids)
if nchans == 0:
raise RuntimeError("Spikes have no common chans for RMS error")
# collect data between tis from chans from all spikes:
print('Getting RMS error on tis=%r, chans=%r of %d spikes' %
(list(tis), list(chans), nspikes))
data = np.zeros((nspikes, nchans, nt), dtype=np.float64)
for sii, sid in enumerate(sids):
spikechans = chanslist[sii]
spikechanis = spikechans.searchsorted(chans)
data[sii] = self.wavedata[sid][spikechanis, ti0:ti1]
# get cluster mean waveform between tis on chans:
wave = self.neurons[nid].get_wave()
chanis = wave.chans.searchsorted(chans)
meandata = np.float64(wave.data[chanis, ti0:ti1])
# calculate RMS error between each spike and the cluster mean waveform:
se = (data - meandata) ** 2 # squared error
# take mean across timepoints and chans, but not across spikes:
mse = se.mean(axis=2).mean(axis=1) # mean squared error
return np.sqrt(mse)
def get_common_chans(self, sids, chans=None):
"""Find channels common to all sids, and optionally to chans as well. Also,
return chanslist, ie list of arrays of chans of sids"""
spikes = self.spikes
chanss = spikes['chans'][sids]
nchanss = spikes['nchans'][sids]
#t0 = time.time()
chanslist = [ cs[:ncs] for cs, ncs in zip(chanss, nchanss) ] # list of arrays
#print('Building chanslist took %.3f sec' % (time.time()-t0))
commonchans = util.intersect1d_uint8(chanslist) # find intersection
if chans is not None and len(chans) > 0:
# values in chans but not in commonchans:
diffchans = np.setdiff1d(chans, commonchans)
commonchans = np.intersect1d(chans, commonchans) # values in both
if len(diffchans) > 0:
print('WARNING: ignored chans %r not common to all spikes' % list(diffchans))
return commonchans, chanslist
def get_Xhash(self, kind, sids, tis, chans, npcsperchan, norm):
"""Return MD5 hex digest of args, for uniquely identifying the matrix resulting
from dimension reduction of spike data"""
h = hashlib.md5()
h.update(kind.encode())
h.update(sids)
h.update(tis)
h.update(chans)
if kind == 'ICA': # consider npcsperchan only if doing ICA
h.update(str(npcsperchan).encode())
h.update(str(norm).encode())
return h.hexdigest()
def create_neuron(self, id=None, inserti=None):
"""Create and return a new Neuron with a unique ID"""
if id == None:
id = self.nextnid
if id in self.neurons:
raise RuntimeError('Neuron %d already exists' % id)
id = int(id) # get rid of numpy ints
neuron = Neuron(self, id)
# add neuron to self
self.neurons[neuron.id] = neuron
if inserti == None:
self.norder.append(neuron.id)
else:
self.norder.insert(inserti, neuron.id)
return neuron
def remove_neuron(self, id):
try:
del self.neurons[id] # may already be removed due to recursive call
del self.clusters[id]
self.norder.remove(id)
except (KeyError, ValueError):
pass
def shift(self, sids, nt):
"""Shift sid waveforms by nt timepoints: -ve shifts waveforms left, +ve shifts right.
For speed, pad waveforms with edge values at the appropriate end"""
spikes = self.spikes
wd = self.wavedata
for sid in sids: # maybe there's a more efficient way than iterating over sids
core.shiftpad(wd[sid], nt) # modifies wd in-place
# update spike parameters:
dt = intround(nt * self.tres) # amount of time to shift by, signed, in us
# so we can later reload the wavedata accurately, shifting the waveform right and
# padding it on its left requires decrementing the associated timepoints
# (and vice versa)
spikes['t'][sids] -= dt
spikes['t0'][sids] -= dt
spikes['t1'][sids] -= dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Opposite sign wrt timepoints above, referencing within
# wavedata:
spikes['tis'][sids] = spikes['tis'][sids] + nt
# this in-place operation raises a TypeError in numpy 1.11.2, something related to
# subtracting an int from an unsigned int:
#spikes['tis'][sid] += nt
# caller should treat all sids as dirty
'''
# replaced by util.alignbest_cy():
def alignbest(self, sids, tis, chans):
"""Align all sids between tis on chans by best fit according to mean squared error.
chans are assumed to be a subset of channels of sids. Return sids
that were actually moved and therefore need to be marked as dirty"""
spikes = self.spikes
nspikes = len(sids)
nchans = len(chans)
wd = self.wavedata
nt = wd.shape[2] # num timepoints in each waveform
ti0, ti1 = tis
subnt = ti1 - ti0 # num timepoints to slice from each waveform
# TODO: make maxshift a f'n of interpolation factor
maxshift = 2 # shift +/- this many timepoints
subntdiv2 = subnt // 2
#print('subntdiv2 on either side of t=0: %d' % subntdiv2)
if subntdiv2 < maxshift:
raise ValueError("Selected waveform duration too short")
#maxshiftus = maxshift * self.stream.tres
# NOTE: in this case, it may be faster to keep shifts and sti0s and sti1s as lists
# of ints instead of np int arrays, maybe because their values are faster to iterate
# over or index with in python loops and lists:
shifts = range(-maxshift, maxshift+1) # from -maxshift to maxshift, inclusive
nshifts = len(shifts)
sti0s = [ ti0+shifti for shifti in range(nshifts) ] # shifted ti0 values
sti1s = [ ti1+shifti for shifti in range(nshifts) ] # shifted ti1 values
sti0ssti1s = zip(sti0s, sti1s)
print("Padding waveforms with up to +/- %d points of fake data" % maxshift)
# not worth subsampling here while calculating meandata, since all this
# stuff in this loop is needed in the shift loop below
subsd = np.zeros((nspikes, nchans, subnt), dtype=wd.dtype) # subset of spike data
spikechanis = np.zeros((nspikes, nchans), dtype=np.int64)
t0 = time.time()
for sidi, sid in enumerate(sids):
spike = spikes[sid]
nspikechans = spike['nchans']
spikechans = spike['chans'][:nspikechans]
spikechanis[sidi] = spikechans.searchsorted(chans)
subsd[sidi] = wd[sid, spikechanis[sidi], ti0:ti1]
print('Mean prep loop for best shift took %.3f sec' % (time.time()-t0))
t0 = time.time()
meandata = subsd.mean(axis=0) # float64
print('Mean for best shift took %.3f sec' % (time.time()-t0))
# choose best shifted waveform for each spike
# widesd holds current spike data plus padding on either side
# to allow for full width slicing for all time shifts:
maxnchans = spikes['nchans'].max() # of all spikes in sort
widesd = np.zeros((maxnchans, maxshift+nt+maxshift), dtype=wd.dtype)
shiftedsubsd = subsd.copy() # init
tempsubshifts = np.zeros((nshifts, nchans, subnt), dtype=wd.dtype)
dirtysids = []
t0 = time.time()
for sidi, sid in enumerate(sids):
# for speed, instead of adding real data, pad start and end with fake values
chanis = spikechanis[sidi]
sd = wd[sid] # sid's spike data
widesd[:, maxshift:-maxshift] = sd # 2D
widesd[:, :maxshift] = sd[:, 0, None] # pad start with first point per chan
widesd[:, -maxshift:] = sd[:, -1, None] # pad end with last point per chan
wideshortsd = widesd[chanis] # sid's padded spike data on chanis, 2D
# keep this inner loop as fast as possible:
for shifti, (sti0, sti1) in enumerate(sti0ssti1s):
tempsubshifts[shifti] = wideshortsd[:, sti0:sti1] # len: subnt
errors = tempsubshifts - meandata # (nshifts, nchans, subnt) - (nchans, subnt)
# get sum squared errors by taking sum across highest two dims - for purpose
# of error comparison, don't need to take mean or square root. Also, order
# of summation along axes doesn't matter, as long as it's done on the highest two:
sserrors = (errors**2).sum(axis=2).sum(axis=1) # nshifts long
bestshifti = sserrors.argmin()
bestshift = shifts[bestshifti]
if bestshift != 0: # no need to update sort.wavedata[sid] if there's no shift
# update time values:
dt = bestshift * self.tres # time to shift by, signed, in us
spikes['t'][sid] += dt # should remain halfway between t0 and t1
spikes['t0'][sid] += dt
spikes['t1'][sid] += dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Opposite sign, referencing within wavedata:
spikes['tis'][sid] -= bestshift
# update sort.wavedata
wd[sid] = widesd[:, bestshifti:bestshifti+nt]
shiftedsubsd[sidi] = tempsubshifts[bestshifti]
dirtysids.append(sid) # mark sid as dirty
print('Shifting loop took %.3f sec' % (time.time()-t0))
AD2uV = self.converter.AD2uV
stdevbefore = AD2uV(subsd.std(axis=0).mean())
stdevafter = AD2uV(shiftedsubsd.std(axis=0).mean())
print('stdev went from %.3f to %.3f uV' % (stdevbefore, stdevafter))
return dirtysids
'''
def alignminmax(self, sids, to):
"""Align sids by their min or max. Return those that were actually moved
and therefore need to be marked as dirty"""
if not self.stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
V0s = spikes['V0'][sids]
V1s = spikes['V1'][sids]
Vss = np.column_stack((V0s, V1s))
alignis = spikes['aligni'][sids]
b = np.column_stack((alignis==0, alignis==1)) # 2D boolean array
if to == 'min':
i = Vss[b] > 0 # indices into sids of spikes aligned to the max peak
elif to == 'max':
i = Vss[b] < 0 # indices into sids of spikes aligned to the min peak
else:
raise ValueError('Unknown to %r' % to)
sids = sids[i] # sids that need realigning
nspikes = len(sids)
print("Realigning %d spikes" % nspikes)
if nspikes == 0: # nothing to do
return [] # no sids to mark as dirty
multichantis = spikes['tis'][sids] # nspikes x nchans x 2 arr
chanis = spikes['chani'][sids] # nspikes arr of max chanis
# peak tis on max chan of each spike, convert from uint8 to int32 for safe math
tis = np.int32(multichantis[np.arange(nspikes), chanis]) # nspikes x 2 arr
# NOTE: tis aren't always in temporal order!
dpeaktis = tis[:, 1] - tis[:, 0] # could be +ve or -ve
dpeaks = spikes['dt'][sids] # stored as +ve
# for each spike, decide whether to add or subtract dpeak to/from its temporal values
ordered = dpeaktis > 0 # in temporal order
reversed = dpeaktis < 0 # in reversed temporal order
alignis = spikes['aligni'][sids]
alignis0 = alignis == 0
alignis1 = alignis == 1
dpeaki = np.zeros(nspikes, dtype=int)
# add dpeak to temporal values to align to later peak
dpeaki[ordered & alignis0 | reversed & alignis1] = 1
# subtact dpeak from temporal values to align to earlier peak
dpeaki[ordered & alignis1 | reversed & alignis0] = -1
# upcast aligni from 1 byte to an int before doing arithmetic on it:
#dalignis = -np.int32(alignis)*2 + 1
dts = dpeaki * dpeaks
dtis = -dpeaki * abs(dpeaktis)
# shift values
spikes['t'][sids] += dts
spikes['t0'][sids] += dts
spikes['t1'][sids] += dts
spikes['tis'][sids] = spikes['tis'][sids] + dtis[:, None, None] # update wrt new t0i
spikes['aligni'][sids[alignis0]] = 1
spikes['aligni'][sids[alignis1]] = 0
# update wavedata for each shifted spike
self.reload_spikes(sids)
return sids # mark all sids as dirty
def choose_new_meanchans(self, sids):
"""Get mean waveform of all sids, then find the mean's chan with max Vpp, then
choose det.maxnchansperspike channels around that maxchan.
Return meanchans, furthestchan, and furthestchani"""
print('Choosing new channel set for all selected spikes')
det = self.detector
meanwave = self.get_mean_wave(sids)
# mean chan with max Vpp:
maxchan = meanwave.chans[meanwave.data.ptp(axis=1).argmax()]
maxchani = det.chans.searchsorted(maxchan)
distances = det.dm.data[maxchani]
# keep the maxnchansperspike closest chans to maxchan, including maxchan:
chanis = distances.argsort()[:det.maxnchansperspike]
meanchans = det.chans[chanis]
meanchans.sort() # keep them sorted
print('meanchans: %r' % list(meanchans))
furthestchan = det.chans[chanis[-1]]
print('furthestchan: %d' % furthestchan)
furthestchani = meanchans.searchsorted(furthestchan)
# sanity checks:
assert len(meanchans) == det.maxnchansperspike
assert maxchan in meanchans
return meanchans, furthestchan, furthestchani
def reload_spikes(self, sids, usemeanchans=False):
"""Update wavedata of designated spikes from stream. Optionally fix incorrect
time values from .sort 0.3 files. Optionally choose new set of channels for all
sids based on the chans closest to the mean of the sids. It's the caller's
responsibility to mark sids as dirty and trigger resaving of .wave file"""
## TODO: add findmaxchan=False and recenteronmaxchan=False kwargs
nsids = len(sids)
print('(Re)loading %d spikes' % nsids)
stream = self.stream
if not stream.is_open():
raise RuntimeError("No open stream to reload spikes from")
spikes = self.spikes
det = self.detector
ver_lte_03 = float(self.__version__) <= 0.3
if ver_lte_03:
print('Fixing potentially incorrect time values during spike reloading')
nfixed = 0
treload = time.time()
if usemeanchans:
if ver_lte_03:
raise RuntimeError("Best not to choose new chans from mean until after "
"converting to .sort >= 0.4")
meanchans, furthestchan, furthestchani = self.choose_new_meanchans(sids)
nmeanchans = len(meanchans)
# split up sids into groups efficient for loading from stream:
ts = spikes[sids]['t'] # noncontig, not a copy
# ensure they're in temporal order:
if not (np.diff(ts) >= 0).all():
print("Selected sids aren't in temporal order, sorting by time...")
tsis = ts.argsort()
sids = sids[tsis]
print("Done sorting sids by time")
# break up spikes by ISIs >= MAXGROUPISI:
splitis = np.where(np.diff(ts) >= MAXGROUPISI)[0] + 1
groups = np.split(sids, splitis)
# limit each group of sids to no more than MAXGROUPDT:
groupi = 0
while groupi < len(groups):
group = groups[groupi] # group of sids all with ISIs < MAXGROUPISI
## TODO: not a copy: is this the optimal way to get the times in this case?
relts = spikes[group]['t'] - spikes[group[0]]['t']
splitis = np.where(np.diff(relts // MAXGROUPDT) > 0)[0] + 1
nsubgroups = len(splitis) + 1
if nsubgroups > 1:
# del original group, replace with subgroups
del groups[groupi]
subgroups = np.split(group, splitis)
groups[groupi:groupi] = subgroups
groupi += len(subgroups)
else:
groupi += 1
print('ngroups: %d' % len(groups))
# process each group:
sidi = 0 # init sid index across all groups, used as status counter
for groupi, group in enumerate(groups):
printflush('<%d>' % groupi, end='')
assert len(group) > 0 # otherwise something went wrong above
t0 = spikes[group[0]]['t0']
t1 = spikes[group[-1]]['t1']
if ver_lte_03:
# load a little extra, in case we need to reload misaligned first and/or
# last spike in this group
t0 -= 5000 # -5 ms
t1 += 5000 # +5 ms
"""
Find union of chans of sids in this group, ask Stream for only those such that no
unnecessary resampling takes place on unneeded chans. Note that this doesn't make
a difference when CAR is enabled in the stream, because the full set of enabled
chans have to be maintained in Stream.__call__ until the very end. Don't bother
cutting out the correct nchans for each sid. At worst, chan 0 (the "empty" chans
array value) will be unnecessarily added to unionchans, and we'll retrieve one
extra chan when creating tempwave, which will then later be discarded:
"""
unionchans = np.unique(spikes['chans'][group])
if usemeanchans:
# now that we have the original unionchans of this group,
# update this group's spikes array entries with meanchans:
spikes['nchans'][group] = nmeanchans
# we're using the max num chans, so assign the full array:
spikes['chans'][group] = meanchans
# now update unionchans as well:
unionchans = np.unique(np.hstack((unionchans, meanchans)))
if 0 not in stream.chans: # if chan 0 is disabled in stream
# remove 0 from unionchans, otherwise an error would be raised when
# calling stream()
unionchans = unionchans[unionchans != 0]
# load and resample only what's needed for this group:
tempwave = stream(t0, t1, unionchans)
# slice out each spike's reloaded data from tempwave:
for sid in group:
# print status:
if sidi % 10000 == 0:
printflush(sidi, end='')
elif sidi % 1000 == 0:
printflush('.', end='')
if usemeanchans: # already checked above that ver_lte_03 == False
# this spike's chans have been set to meanchans, now
# check that each spike's maxchan is in meanchans:
chan = spikes[sid]['chan']
if chan not in meanchans:
# replace furthest chan with spike's maxchan:
print("spike %d: replacing furthestchan %d with spike's maxchan %d"
% (sid, furthestchan, chan))
nchans = spikes[sid]['nchans']
chans = spikes[sid]['chans'][:nchans]
# replace furthest chan with max chan, modifies spikes array in-place:
chans[furthestchani] = chan
# make sure chans remain sorted:
chans.sort()
# this isn't necessary, because all the above was in-place:
#spikes['chans'][sid][:nchans] = chans
spike = spikes[sid]
nchans = spike['nchans']
chans = spike['chans'][:nchans]
rd = tempwave[spike['t0']:spike['t1']][chans].data # reloaded data
if ver_lte_03: # fix potentially incorrect spike tis
result = self.reload_spike_ver_lte_03(sid, nchans, tempwave, rd)
if result == None:
sidi += 1 # inc status counter
continue # rollwin2D won't work, skip to next sid
else:
rd, fixed = result
if fixed:
nfixed += 1
nt = rd.shape[1]
self.wavedata[sid, :nchans, :nt] = rd # update wavedata
sidi += 1 # inc status counter
print()
if ver_lte_03:
print('Fixed time values of %d spikes' % nfixed)
print('(Re)loaded %d spikes, took %.3f sec' % (len(sids), time.time()-treload))
def reload_spike_ver_lte_03(self, sid, nchans, tempwave, rd):
"""In sort.__version__ <= 0.3, t, t0, t1, and tis were not updated
during alignbest() calls. To fix this, load new data with old potentially
incorrect t0 and t1 values, and compare this new data to existing old data
in wavedata array. Find where the non-repeating parts of the old data fits
into the new, and calculate the correction needed to fix the time values.
Finally, reload new data according to these corrected time values."""
#print('Reloading sid from ver_lte_03: %d' % sid)
od = self.wavedata[sid, :nchans] # old data
# indices that strip const values from left and right ends:
lefti, righti = lrrep2Darrstripis(od)
od = od[:, lefti:righti] # stripped old data
# reloaded data rd uses old incorrect t0 and t1, but they should be
# wide enough to encompass the non-repeating parts of the old data
width = od.shape[1] # rolling window width
if not width <= rd.shape[1]:
print('') # newline
print("WARNING: od.shape[1]=%d > rd.shape[1]=%d for sid %d" %
(od.shape[1], rd.shape[1], sid))
#import pdb; pdb.set_trace()
return
odinndis = np.where((rollwin2D(rd, width) == od).all(axis=1).all(axis=1))[0]
if len(odinndis) == 0: # no hits of old data in new
dnt = 0 # reload data based on current timepoints
elif len(odinndis) == 1: # exactly 1 hit of old data in new
odinndi = odinndis[0] # pull it out
dnt = odinndi - lefti # num timepoints to correct by, signed
else:
raise RuntimeError("Multiple hits of old data in new, don't know "
"how to reload spike %d" % sid)
newrd, fixed = rd, False
if dnt != 0:
dt = intround(dnt * self.tres) # time to correct by, signed, in us
spikes['t'][sid] += dt # should remain halfway between t0 and t1
spikes['t0'][sid] += dt
spikes['t1'][sid] += dt
# might result in some out of bounds tis because the original peaks
# have shifted off the ends. Use opposite sign because we're
# referencing within wavedata:
# in versions <= 0.3, 'tis' were named 'phasetis':
spikes['phasetis'][sid] = spikes['phasetis'][sid] - dnt
spike = spikes[sid]
# reslice tempwave again now that t0 and t1 have changed
newrd = tempwave[spike['t0']:spike['t1']][chans].data
fixed = True
#printflush('F', end='')
return newrd, fixed
def reload_spikes_and_templates(self, sids, usemeanchans=False):
self.reload_spikes(sids, usemeanchans=usemeanchans)
# update neuron templates:
unids = np.unique(self.spikes['nid'][sids])
unids = unids[unids != 0] # exclude junk cluster, which doesn't have a neuron
neurons = [ self.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
def init_spike_alignment(self):
"""Set initial spike alignment points according to alignment points of each
spike's neuron"""
print('Setting initial spike alignment points')
ntis, nalignis = {}, {} # tis and aligni derived from each neuron's mean waveform
for neuron in self.neurons.values():
nwave = neuron.get_wave() # update and return mean waveform
mintis = nwave.data.argmin(axis=1)
maxtis = nwave.data.argmax(axis=1)
ntis[neuron.id] = np.column_stack([mintis, maxtis])
# choose aligni with least variance:
nalignis[neuron.id] = np.argmin([mintis.std(), maxtis.std()])
AD2uV = self.converter.AD2uV
for s, wd in zip(self.spikes, self.wavedata):
sid = s['id']
# print out progress on a regular basis:
if sid % 100000 == 0:
printflush(sid, end='')
elif sid % 10000 == 0:
printflush('.', end='')
nid = s['nid']
#chan = s['chan']
nchans = s['nchans']
chans = s['chans'][:nchans]
neuronchans = self.neurons[nid].wave.chans
assert (chans == neuronchans).all()
s['tis'][:nchans] = ntis[nid] # set according to its neuron, wrt t0i=0
s['aligni'] = nalignis[nid] # set according to its neuron
maxchani = s['chani']
t0i, t1i = int(s['tis'][maxchani, 0]), int(s['tis'][maxchani, 1])
s['dt'] = abs(t1i - t0i) / self.sampfreq * 1e6 # us
# note that V0 and V1 might not be of opposite sign, because tis are derived
# from mean neuron waveform, not from each individual spike:
s['V0'], s['V1'] = AD2uV(wd[maxchani, t0i]), wd[maxchani, t1i] # uV
s['Vpp'] = abs(s['V1'] - s['V0']) # uV
print()
def spatially_localize_spikes(self, sortwin, method='fit'):
"""Assuming that wavedata have been extracted and neuron mean waveforms calculated,
find tis and perform spatial localization of every spike in self"""
det = self.detector
weights2f = self.extractor.weights2spatial
weights2spatialmean = self.extractor.weights2spatialmean
f = self.extractor.f
nreject = 0 # number spikes rejected during spatial localization
print('Running spatial localization on all %d spikes' % self.nspikes)
tstart = time.clock()
## TODO: chan this be multithreaded/processed?
for s, wd in zip(self.spikes, self.wavedata):
# Get Vpp at each inclchan's tis, use as spatial weights:
# see core.rowtake() or util.rowtake_cy() for indexing explanation:
sid = s['id']
# print out progress on a regular basis:
if sid % 10000 == 0:
printflush(sid, end='')
elif sid % 1000 == 0:
printflush('.', end='')
chan = s['chan']
nchans = s['nchans']
chans = s['chans'][:nchans]
maxchani = s['chani']
chanis = det.chans.searchsorted(chans)
w = np.float32(wd[np.arange(s['nchans'])[:, None], s['tis'][:nchans]]) # nchans x 2
w = abs(w).sum(axis=1) # Vpp for each chan, measured at t0i and t1i
x = det.siteloc[chanis, 0] # 1D array (row)
y = det.siteloc[chanis, 1]
if method == 'fit':
# localize by fitting extractor.f function to wavedata
params = weights2f(f, w, x, y, maxchani)
elif method == 'mean':
# set localization to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# a very ad-hoc guess for spatial sigma:
sx = 2 * dist((x0, y0), self.probe.SiteLoc[chan])
params = x0, y0, sx, sx
else:
print('Unknown method %r' % method)
if params == None: # presumably a non-localizable many-channel noise event
#printflush('X', end='') # to indicate a rejected spike
if DEBUG:
spiket = intround(s['t']) # nearest us
det.log("Reject spike %d at t=%d based on fit params" % (sid, spiket))
neuron = self.neurons[s['nid']]
# remove from its neuron, add to unsorted list of spikes:
sortwin.MoveSpikes2List(neuron, [sid], update=False)
# manually set localization params to Vpp-weighted spatial mean and 0 sigma:
x0, y0 = weights2spatialmean(w, x, y)
# set sigma to 0 um, and then later round lockr up to 1 um so that only one
# raster tick shows up for each rejected spike, reducing clutter
params = x0, y0, 0, 0
nreject += 1
# Save spatial fit params, and "lockout" only the channels within lockrx*sx
# of the fit spatial location of the spike, up to a max of inclr. "Lockout"
# in this case only refers to which channels are highlighted with a raster tick
# for each spike:
s['x0'], s['y0'], s['sx'], s['sy'] = params
x0, y0 = s['x0'], s['y0']
# lockout radius for this spike:
lockr = min(det.lockrx*s['sx'], det.inclr) # in um
lockr = max(lockr, 1) # at least 1 um, so at least the maxchan gets a tick
# test y coords of chans in y array, ylockchaniis can be used to index
# into x, y and chans:
ylockchaniis, = np.where(np.abs(y - y0) <= lockr) # convert bool arr to int
# test Euclid distance from x0, y0 for each ylockchani:
lockchaniis = ylockchaniis.copy()
for ylockchanii in ylockchaniis:
if dist((x[ylockchanii], y[ylockchanii]), (x0, y0)) > lockr:
# Euclidean distance is too great, remove ylockchanii from lockchaniis:
lockchaniis = lockchaniis[lockchaniis != ylockchanii]
lockchans = chans[lockchaniis]
nlockchans = len(lockchans)
s['lockchans'][:nlockchans], s['nlockchans'] = lockchans, nlockchans
print('Spatial localization of spikes took %.3f s' % (time.clock() - tstart))
return nreject
'''
def get_component_matrix(self, dims=None, weighting=None):
"""Convert spike param matrix into pca/ica data for clustering"""
import mdp # can't delay this any longer
X = self.get_param_matrix(dims=dims)
if weighting == None:
return X
if weighting.lower() == 'ica':
node = mdp.nodes.FastICANode()
elif weighting.lower() == 'pca':
node = mdp.nodes.PCANode()
else:
raise ValueError, 'unknown weighting %r' % weighting
node.train(X)
features = node.execute(X) # returns all available components
#self.node = node
#self.weighting = weighting
#self.features = features
return features
def get_ids(self, cids, spikes):
"""Convert a list of cluster ids into 2 dicts: n2sids maps neuron IDs to
spike IDs; s2nids maps spike IDs to neuron IDs"""
cids = np.asarray(cids)
cids = cids - cids.min() # make sure cluster IDs are 0-based
uniquecids = set(cids)
nclusters = len(uniquecids)
# neuron ID to spike IDs (plural) mapping
n2sids = dict(zip(uniquecids, [ [] for i in range(nclusters) ]))
s2nids = {} # spike ID to neuron ID mapping
for spike, nid in zip(spikes, cids):
s2nids[spike['id']] = nid
n2sids[nid].append(spike['id'])
return n2sids, s2nids
def write_spc_input(self):
"""Generate input data file to SPC"""
X = self.get_component_matrix()
# write to space-delimited .dat file. Each row is a spike, each column a param
spykedir = os.path.dirname(__file__)
dt = str(datetime.datetime.now())
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
self.spcdatfname = os.path.join(spykedir, 'spc', dt+'.dat')
# not sure why spc adds the dg_01 part:
self.spclabfname = os.path.join(spykedir, 'spc', dt+'.dg_01.lab')
f = open(self.spcdatfname, 'w')
for params in X: # write text data to file, one row at a time
params.tofile(f, sep=' ', format='%.6f')
f.write('\n')
f.close()
def parse_spc_lab_file(self, fname=None):
"""Parse output .lab file from SPC. Each row in the file is the assignment of each
spin (datapoint) to a cluster, one row per temperature datapoint. First column is
temperature run number (0-based). 2nd column is the temperature. All remaining
columns correspond to the datapoints in the order presented in the input .dat file.
Returns (Ts, cids)"""
#spikes = self.get_spikes_sortedby('id')
if fname == None:
defaultDir = r"C:\Documents and Settings\Administrator\Desktop\Charlie\From"
dlg = wx.FileDialog(None, message="Open SPC .lab file",
defaultDir=defaultDir, defaultFile='',
wildcard="All files (*.*)|*.*|.lab files (*.lab)|*.lab|",
style=wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
fname = dlg.GetPath()
dlg.Destroy()
data = np.loadtxt(fname, dtype=np.float32)
Ts = data[:, 1] # 2nd column
cids = np.int32(data[:, 2:]) # 3rd column on
print('Parsed %r' % fname)
return Ts, cids
def parse_charlies_output(self, fname=None):
if fname == None:
fname = (r'C:\Documents and Settings\Administrator\Desktop\Charlie\'
'From\2009-07-20\clustered_events_coiflet_T0.125.txt')
nids = np.loadtxt(fname, dtype=int) # one neuron id per spike
return nids
def write_spc_app_input(self):
"""Generate input data file to spc_app"""
spikes = self.get_spikes_sortedby('id')
X = self.get_component_matrix()
# write to tab-delimited data file. Each row is a param, each column a spike
# (this is the transpose of X)
# first row has labels "AFFX", "NAME", and then spike ids
# first col has labels "AFFX", and then param names
f = open(r'C:\home\mspacek\Desktop\Work\SPC\Weizmann\spc_app\spc_app_input.txt', 'w')
f.write('AFFX\tNAME\t')
for spike in spikes:
f.write('s%d\t' % spike['id'])
f.write('\n')
for parami, param in enumerate(['Vpp', 'dt', 'x0', 'y0', 'sx', 'sy', 'theta']):
f.write(param+'\t'+param+'\t')
for val in X[:, parami]:
f.write('%f\t' % val)
f.write('\n')
f.close()
def hcluster(self, t=1.0):
"""Hierarchically cluster self.spikes
TODO: consider doing multiple cluster runs. First, cluster by spatial location (x0,
y0). Then split those clusters up by Vpp. Then those by spatial distrib (sy/sx,
theta), then by temporal distrib (dt, s1, s2). This will ensure that the lousier
params will only be considered after the best ones already have, and therefore that
you start off with pretty good clusters that are then only slightly refined using
the lousy params
"""
spikes = self.get_spikes_sortedby('id')
X = self.get_component_matrix()
print(X)
# try 'weighted' or 'average' with 'mahalanobis'
cids = fclusterdata(X, t=t, method='single', metric='euclidean')
n2sids, s2nids = self.get_ids(cids, spikes)
return n2sids
def export2Charlie(self, fname='spike_data', onlymaxchan=False, nchans=3, npoints=32):
"""Export spike data to a text file, one spike per row.
Columns are x0, y0, followed by most prominent npoints datapoints
(1/4, 3/4 wrt spike time) of each nearest nchans. This is to
give to Charlie to do WPD and SPC on"""
if onlymaxchan:
nchans = 1
assert np.log2(npoints) % 1 == 0, 'npoints is not a power of 2'
# get ti - time index each spike is assumed to be centered on
self.spikes[0].update_wave(self.stream) # make sure it has a wave
ti = intround(self.spikes[0].wave.data.shape[-1] / 4) # 13 for 50 kHz, 6 for 25 kHz
dims = self.nspikes, 2+nchans*npoints
output = np.empty(dims, dtype=np.float32)
dm = self.detector.dm
chanis = np.arange(len(dm.data))
coords = np.asarray(dm.coords)
xcoords = coords[:, 0]
ycoords = coords[:, 1]
sids = list(self.spikes) # self.spikes is a dict!
sids.sort()
for sid in sids:
spike = self.spikes[sid]
chani = spike.chani # max chani
x0, y0 = spike.x0, spike.y0
if onlymaxchan:
nearestchanis = np.asarray([chani])
else:
# find closest chans to x0, y0
d2s = (xcoords - x0)**2 + (ycoords - y0)**2 # squared distances
sortis = d2s.argsort()
nearestchanis = chanis[sortis][0:nchans] # pick the first nchan nearest chans
if chani not in nearestchanis:
print("WARNING: max chani %d is not among the %d chanis nearest "
"(x0, y0) = (%.1f, %.1f) for spike %d at t=%d"
% (chani, nchans, x0, y0, sid, spike.t))
if spike.wave.data is None:
spike.update_wave(self.stream)
row = [x0, y0]
for chani in nearestchanis:
chan = dm.chans[chani] # dereference
try:
data = spike.wave[chan].data[0] # pull out singleton dimension
except IndexError: # empty array
data = np.zeros(data.shape[-1], data.dtype)
row.extend(data[ti-npoints/4:ti+npoints*3/4])
output[sid] = row
dt = str(datetime.datetime.now())
dt = dt.split('.')[0] # ditch the us
dt = dt.replace(' ', '_')
dt = dt.replace(':', '.')
fname += '.' + dt + '.txt'
np.savetxt(fname, output, fmt='%.1f', delimiter=' ')
def match(self, templates=None, weighting='signal', sort=True):
"""Match templates to all .spikes with nearby maxchans,
save error values to respective templates.
Note: slowest step by far is loading in the wave data from disk.
(First match is slow, subsequent ones are ~ 15X faster.)
Unless something's done about that in advance, don't bother optimizing here much.
Right now, once waves are loaded, performance is roughly 20000 matches/sec
TODO: Nick's alternative to gaussian distance weighting: have two templates: a mean
template, and an stdev template, and weight the error between each matched
spike and the mean on each chan at each timepoint by the corresponding stdev value
(divide the error by the stdev, so that timepoints with low stdev are more sensitive
to error)
TODO: looks like I still need to make things more nonlinear - errors at high signal
values aren't penalized enough, while errors at small signal values are penalized
too much. Try cubing both signals, then taking sum(err**2)
DONE: maybe even better, instead of doing an elaborate cubing of signal, followed by
a rather elaborate gaussian spatiotemporal weighting of errors, just take difference
of signals, and weight the error according to the abs(template_signal) at each point
in time and across chans. That way, error in parts of the signal far from zero are
considered more important than deviance of perhaps similar absolute value for signal
close to zero
"""
# None defaults to matching all templates:
templates = templates or self.templates.values()
sys.stdout.write('matching')
t0 = time.time()
nspikes = len(self.spikes)
dm = self.detector.dm
for template in templates:
template.err = [] # overwrite any existing .err attrib
tw = template.tw
templatewave = template.wave[template.chans] # pull out template's enabled chans
#stdev = template.get_stdev()[template.chans] # pull out template's enabled chans
# replace any 0s with 1s - TODO: what's best way to avoid singularities?:
#stdev[stdev == 0] = 1
# Gaussian weighting in space and/or time:
weights = template.get_weights(weighting=weighting, sstdev=self.detector.slock/2,
tstdev=self.detector.tlock/2)
for spike in self.spikes.values():
# check if spike.maxchan is outside some minimum distance from template.maxchan
if dm[template.maxchan, spike.maxchan] > MAXCHANTOLERANCE: # um
continue # don't even bother
if spike.wave.data is None or template.tw != TW: # make sure their data line up
spike.update_wave(tw) # this slows things down a lot, but is necessary
# slice template's enabled chans out of spike, calculate sum of
# squared weighted error
# first impression is that dividing by stdev makes separation worse, not better
# low stdev means more sensitive to error:
#err = (templatewave.data - spike.wave[template.chans].data) / stdev * weights
# pull out template's enabled chans from spike:
spikewave = spike.wave[template.chans]
if weighting == 'signal':
tsdata = np.asarray([templatewave.data, spikewave.data])
# take elementwise max of abs of template and spike data:
weights = np.abs(tsdata).max(axis=0)
err = (templatewave.data - spikewave.data) * weights # weighted error
err = (err**2).sum(axis=None) # sum of squared weighted error
template.err.append((spike.id, intround(err)))
template.err = np.asarray(template.err, dtype=np.int64)
if sort and len(template.err) != 0:
i = template.err[:, 1].argsort() # row indices that sort by error
template.err = template.err[i]
sys.stdout.write('.')
print('\nmatch took %.3f sec' % (time.time()-t0))
'''
class Neuron(object):
"""A collection of spikes that have been deemed somehow, whether manually
or automatically, to have come from the same cell. A Neuron's waveform
is the mean of its member spikes"""
def __init__(self, sort, id=None):
self.sort = sort
self.id = id # neuron id
self.wave = WaveForm() # init to empty waveform
self.sids = np.array([], dtype=int) # indices of spikes that make up this neuron
# relative reference timestamp, here for symmetry with fellow spike rec
# (obj.t comes up sometimes):
self.t = 0
self.plt = None # Plot currently holding self
self.cluster = None
self.good = False # user can mark this neuron as "good" if so desired
#self.fname # not here, let's allow neurons to have spikes from different files?
def get_chans(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans # self.chans just refers to self.wave.chans
chans = property(get_chans)
def get_chan(self):
if self.wave.data is None:
self.update_wave()
return self.wave.chans[self.wave.data.ptp(axis=1).argmax()] # chan with max Vpp
chan = property(get_chan)
def get_nspikes(self):
return len(self.sids)
nspikes = property(get_nspikes)
def __getstate__(self):
"""Get object state for pickling"""
d = self.__dict__.copy()
# don't save any calculated PCs/ICs:
#d.pop('X', None)
#d.pop('Xhash', None)
# don't save plot self is assigned to, since that'll change anyway on unpickle
d['plt'] = None
return d
def get_wave(self):
"""Check for valid mean and std waveform before returning it"""
# many neuron waveforms saved in old .sort files won't have a wave.std field:
try:
self.wave.std
except AttributeError:
return self.update_wave()
if self.wave == None or self.wave.data is None or self.wave.std is None:
return self.update_wave()
else:
return self.wave # return existing waveform
def update_wave(self):
"""Update mean and std of self's waveform"""
sort = self.sort
spikes = sort.spikes
if len(self.sids) == 0: # no member spikes, perhaps I should be deleted?
raise RuntimeError("n%d has no spikes and its waveform can't be updated" % self.id)
meanwave = sort.get_mean_wave(self.sids, nid=self.id)
# update self's Waveform object
self.wave.data = meanwave.data
self.wave.std = meanwave.std
self.wave.ts = sort.twts.copy() # meanwave has no .ts, copy for clean jsonpickle
self.wave.chans = meanwave.chans
self.wave.tres = sort.tres # meanwave has no .tres
return self.wave
def __sub__(self, other):
"""Return difference array between self and other neurons' waveforms
on common channels"""
selfwavedata, otherwavedata = self.getCommonWaveData(other.chan, other.chans,
other.wave.data)
return selfwavedata - otherwavedata
def getCommonWaveData(self, otherchan, otherchans, otherwavedata):
"""Return waveform data common to self's chans and otherchans, while
requiring that both include the other's maxchan"""
chans = np.intersect1d(self.chans, otherchans, assume_unique=True)
if len(chans) == 0:
raise ValueError('No common chans')
if self.chan not in chans or otherchan not in chans:
raise ValueError("maxchans aren't part of common chans")
selfchanis = self.chans.searchsorted(chans)
otherchanis = otherchans.searchsorted(chans)
return self.wave.data[selfchanis], otherwavedata[otherchanis]
'''
def get_stdev(self):
"""Return 2D array of stddev of each timepoint of each chan of member spikes.
Assumes self.update_wave has already been called"""
data = []
# TODO: speed this up by pre-allocating memory and then filling in the array
for spike in self.spikes:
data.append(spike.wave.data) # collect spike's data
stdev = np.asarray(data).std(axis=0)
return stdev
def get_weights(self, weighting=None, sstdev=None, tstdev=None):
"""Returns unity, spatial, temporal, or spatiotemporal Gaussian weights
for self's enabled chans in self.wave.data, given spatial and temporal
stdevs"""
nchans = len(self.wave.chans)
nt = len(self.wave.data[0]) # assume all chans have the same number of timepoints
if weighting == None:
weights = 1
elif weighting == 'spatial':
weights = self.get_gaussian_spatial_weights(sstdev) # vector
elif weighting == 'temporal':
weights = self.get_gaussian_temporal_weights(tstdev) # vector
elif weighting == 'spatiotemporal':
sweights = self.get_gaussian_spatial_weights(sstdev)
tweights = self.get_gaussian_temporal_weights(tstdev)
weights = np.outer(sweights, tweights) # matrix, outer product of the two
elif weighting == 'signal':
weights = None # this is handled by caller
#print('\nweights:\n%r' % weights)
return weights
def get_gaussian_spatial_weights(self, stdev):
"""Return a vector that weights self.chans according to a 2D gaussian
centered on self.maxchan with standard deviation stdev in um"""
g = Gaussian(mean=0, stdev=stdev)
# distances between maxchan and all enabled chans:
d = self.sort.detector.dm[self.maxchan, self.chans]
weights = g[d]
weights.shape = (-1, 1) # vertical vector with nchans rows, 1 column
return weights
def get_gaussian_temporal_weights(self, stdev):
"""Return a vector that weights timepoints in self's mean waveform
by a gaussian centered on t=0, with standard deviation stdev in us"""
g = Gaussian(mean=0, stdev=stdev)
ts = self.wave.ts # template mean timepoints relative to t=0 spike time
weights = g[ts] # horizontal vector with 1 row, nt timepoints
return weights
'''
class PTCSHeader(object):
"""
Polytrode clustered spikes file header:
formatversion: int64 (currently version 3)
ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
descr: ndescrbytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
nneurons: uint64 (number of neurons)
nspikes: uint64 (total number of spikes)
nsamplebytes: uint64 (number of bytes per template waveform sample)
samplerate: uint64 (Hz)
npttypebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
pttype: npttypebytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
nptchans: uint64 (total num chans in polytrode)
chanpos: nptchans * 2 * float64
(array of (x, y) positions, in um, relative to top of polytrode,
indexed by 0-based channel IDs)
nsrcfnamebytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
srcfname: nsrcfnamebytes of ASCII text
(source file name, probably .srf, padded with null bytes if needed for
8 byte alignment)
datetime: float64
(absolute datetime corresponding to t=0 us timestamp, stored as days since
epoch: December 30, 1899 at 00:00)
ndatetimestrbytes: uint64
datetimestr: ndatetimestrbytes of ASCII text
(human readable string representation of datetime, preferrably ISO 8601,
padded with null bytes if needed for 8 byte alignment)
"""
FORMATVERSION = 3 # overall .ptcs file format version, not header format version
def __init__(self, sort, sortpath, stream, nneurons, nspikes, nsamplebytes,
fullfname, exportdt, user='', notes=''):
self.sort = sort
self.stream = stream
self.nneurons = nneurons
self.nspikes = nspikes
self.nsamplebytes = nsamplebytes
homelessfullfname = lstrip(fullfname, os.path.expanduser('~'))
sortfname = sort.fname
sortfullfname = os.path.join(sortpath, sortfname)
sortfmoddt = str(datetime.datetime.fromtimestamp(os.path.getmtime(sortfullfname)))
sortfmoddt = sortfmoddt.split('.')[0] # ditch the us
sortfsize = os.path.getsize(sortfullfname) # in bytes
d = {'file_type': '.ptcs (polytrode clustered spikes) file',
'original_fname': homelessfullfname, 'export_time': exportdt,
'sort': {'fname': sortfname, 'path': sortpath,
'fmtime': sortfmoddt, 'fsize': sortfsize},
'user': user, 'notes': notes}
descr = str(d)
self.descr = pad(descr, align=8)
self.srcfname = pad(lstrip(stream.fname, '../'), align=8)
self.pttype = pad(stream.probe.name, align=8)
self.dt = stream.datetime
self.dtstr = pad(self.dt.isoformat(), align=8)
def write(self, f):
s = self.sort
np.int64(self.FORMATVERSION).tofile(f) # formatversion
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr
np.uint64(self.nneurons).tofile(f) # nneurons
np.uint64(self.nspikes).tofile(f) # nspikes
np.uint64(self.nsamplebytes).tofile(f) # nsamplebytes
np.uint64(s.sampfreq).tofile(f) # samplerate
np.uint64(len(self.pttype)).tofile(f) # npttypebytes
f.write(self.pttype) # pttype
np.uint64(s.stream.probe.nchans).tofile(f) # nptchans
np.float64(s.stream.probe.siteloc_arr()).tofile(f) # chanpos
np.uint64(len(self.srcfname)).tofile(f) # nsrcfnamebytes
f.write(self.srcfname) # srcfname
np.float64(td2days(self.dt - EPOCH)).tofile(f) # datetime (in days)
np.uint64(len(self.dtstr)).tofile(f) # ndatetimestrbytes
f.write(self.dtstr)
class PTCSNeuronRecord(object):
"""
Polytrode clustered spikes file neuron record:
nid: int64 (signed neuron id, could be -ve, could be non-contiguous with previous)
ndescrbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment, defaults to 0)
descr: ndescrbytes of ASCII text
(padded with null bytes if needed for 8 byte alignment)
clusterscore: float64
xpos: float64 (um)
ypos: float64 (um)
sigma: float64 (um) (Gaussian spatial sigma)
nchans: uint64 (num chans in template waveforms)
chanids: nchans * uint64 (0 based IDs of channels in template waveforms)
maxchanid: uint64 (0 based ID of max channel in template waveforms)
nt: uint64 (num timepoints per template waveform channel)
nwavedatabytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
wavedata: nwavedatabytes of nsamplebytes sized floats
(template waveform data, laid out as nchans * nt, in uV,
padded with null bytes if needed for 8 byte alignment)
nwavestdbytes: uint64 (nbytes, keep as multiple of 8 for nice alignment)
wavestd: nwavestdbytes of nsamplebytes sized floats
(template waveform standard deviation, laid out as nchans * nt, in uV,
padded with null bytes if needed for 8 byte alignment)
nspikes: uint64 (number of spikes in this neuron)
spike timestamps: nspikes * uint64 (us, should be sorted)
"""
def __init__(self, neuron, spikets=None, nsamplebytes=None, descr=''):
n = neuron
AD2uV = n.sort.converter.AD2uV
self.neuron = neuron
self.spikets = spikets # constrained to stream range, may be < neuron.sids
self.wavedtype = {2: np.float16, 4: np.float32, 8: np.float64}[nsamplebytes]
if n.wave.data is None or n.wave.std is None: # some may have never been displayed
n.update_wave()
# wavedata and wavestd are nchans * nt * nsamplebytes long:
self.wavedata = pad(self.wavedtype(AD2uV(n.wave.data)), align=8)
self.wavestd = pad(self.wavedtype(AD2uV(n.wave.std)), align=8)
self.descr = pad(descr, align=8)
def write(self, f):
n = self.neuron
np.int64(n.id).tofile(f) # nid
np.uint64(len(self.descr)).tofile(f) # ndescrbytes
f.write(self.descr) # descr, bytes
np.float64(np.nan).tofile(f) # clusterscore
np.float64(n.cluster.pos['x0']).tofile(f) # xpos (um)
np.float64(n.cluster.pos['y0']).tofile(f) # ypos (um)
np.float64(n.cluster.pos['sx']).tofile(f) # sigma (um)
np.uint64(len(n.wave.chans)).tofile(f) # nchans
np.uint64(n.wave.chans).tofile(f) # chanids
np.uint64(n.chan).tofile(f) # maxchanid
np.uint64(len(n.wave.ts)).tofile(f) # nt
np.uint64(self.wavedata.nbytes).tofile(f) # nwavedatabytes
self.wavedata.tofile(f) # wavedata
np.uint64(self.wavestd.nbytes).tofile(f) # nwavestdbytes
self.wavestd.tofile(f) # wavestd
np.uint64(len(self.spikets)).tofile(f) # nspikes
np.uint64(self.spikets).tofile(f) # spike timestamps (us)
class PanelScrollArea(QtGui.QScrollArea):
"""A scroll area for the spikesortpanel"""
def keyPressEvent(self, event):
key = event.key()
# seems the ENTER key needs be handled to directly call plot, unlike in sortwin
# where the event is passed on to be handled by the list widgets
if key in [Qt.Key_Enter, Qt.Key_Return]:
sortwin = self.topLevelWidget()
sortwin.parent().ui.plotButton.click()
else:
QtGui.QScrollArea.keyPressEvent(self, event) # pass it on
class SortWindow(SpykeToolWindow):
"""Sort window"""
def __init__(self, parent, pos=None):
SpykeToolWindow.__init__(self, parent, flags=QtCore.Qt.Tool)
self.spykewindow = parent
ncols = self.sort.probe.ncols
nrows = self.sort.probe.nrows
# try and allow the same amount of horizontal space per column for 2 and 3 col probes:
if ncols <= 2:
self.MAINSPLITTERPOS = 300
else:
self.MAINSPLITTERPOS = 265 # move it more to the left
# make horizontal sort slider use as little vertical space as possible
self.VSPLITTERPOS = 1
panelwidth = PANELWIDTHPERCOLUMN * ncols
panelheight = PANELHEIGHTPERROW * nrows
width = max(self.MAINSPLITTERPOS + panelwidth + VSCROLLBARWIDTH, MINSORTWINDOWWIDTH)
size = (width, SORTWINDOWHEIGHT)
self.setWindowTitle('Sort Window')
self.move(*pos)
self.resize(*size)
self._source = None # source cluster for comparison
self.slider = SpikeSelectionSlider(Qt.Horizontal, self)
self.slider.setInvertedControls(True)
self.slider.setToolTip('Position of sliding spike selection time window')
self.connect(self.slider, QtCore.SIGNAL('valueChanged(int)'),
self.on_slider_valueChanged)
self.connect(self.slider, QtCore.SIGNAL('sliderPressed()'),
self.on_slider_sliderPressed)
self.nlist = NList(self)
self.nlist.setToolTip('Neuron list')
self.nslist = NSList(self)
self.nslist.setToolTip('Sorted spike list')
self.uslist = USList(self) # should really be multicolumn tableview
self.uslist.setToolTip('Unsorted spike list')
tw = self.spykewindow.sort.tw
self.panel = SpikeSortPanel(self, tw=tw)
self.panel.setMinimumSize(QtCore.QSize(panelwidth, panelheight))
self.panelscrollarea = PanelScrollArea(self)
self.panelscrollarea.setWidget(self.panel)
self.panelscrollarea.setMinimumWidth(panelwidth + VSCROLLBARWIDTH)
self.panelscrollarea.setWidgetResizable(True) # allows panel to size bigger than min
self.vsplitter = QtGui.QSplitter(Qt.Vertical)
self.vsplitter.addWidget(self.slider)
self.vsplitter.addWidget(self.nlist)
self.vsplitter.addWidget(self.nslist)
self.vsplitter.addWidget(self.uslist)
self.mainsplitter = QtGui.QSplitter(Qt.Horizontal)
self.mainsplitter.addWidget(self.vsplitter)
self.mainsplitter.addWidget(self.panelscrollarea)
self.layout = QtGui.QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.addWidget(self.mainsplitter)
mainwidget = QtGui.QWidget(self)
mainwidget.setLayout(self.layout)
self.setCentralWidget(mainwidget)
self.toolbar = self.setupToolbar()
self.addToolBar(self.toolbar)
def setupToolbar(self):
toolbar = QtGui.QToolBar(self)
toolbar.setObjectName('toolbar')
toolbar.setFloatable(True)
toolbar.setIconSize(QtCore.QSize(16, 16)) # like in main spyke window
actionDelete = QAction(QIcon('res/edit-delete.svg'), 'Del', self)
tt = ('<nobr><b>Del</b> Delete selected spikes or clusters</nobr>\n'
'<nobr><b>CTRL+Del</b> Delete selected spikes</nobr>')
actionDelete.setToolTip(tt)
self.connect(actionDelete, QtCore.SIGNAL('triggered()'),
self.on_actionDelete_triggered)
toolbar.addAction(actionDelete)
actionMergeClusters = QAction('M', self)
tt = '<nobr><b>M</b> Merge clusters</nobr>'
actionMergeClusters.setToolTip(tt)
self.connect(actionMergeClusters, QtCore.SIGNAL('triggered()'),
self.on_actionMergeClusters_triggered)
toolbar.addAction(actionMergeClusters)
#actionToggleClustersGood = QAction(QIcon('res/dialog-apply.svg'), 'G', self)
actionToggleClustersGood = QAction('G', self)
tt = '<nobr><b>G</b> Toggle clusters as "good"</nobr>'
actionToggleClustersGood.setToolTip(tt)
self.connect(actionToggleClustersGood, QtCore.SIGNAL('triggered()'),
self.on_actionToggleClustersGood_triggered)
toolbar.addAction(actionToggleClustersGood)
actionSplit = QAction('+', self)
tt = '<nobr><b>+</b> Split off selected spikes</nobr>'
actionSplit.setToolTip(tt)
self.connect(actionSplit, QtCore.SIGNAL('triggered()'),
self.on_actionSplit_triggered)
toolbar.addAction(actionSplit)
actionLabelMultiunit = QAction('-', self)
tt = '<nobr><b>-</b> Label clusters as multiunit</nobr>'
actionLabelMultiunit.setToolTip(tt)
self.connect(actionLabelMultiunit, QtCore.SIGNAL('triggered()'),
self.on_actionLabelMultiunit_triggered)
toolbar.addAction(actionLabelMultiunit)
actionChanSplitClusters = QAction('/', self)
tt = '<nobr><b>/</b> Split clusters by channels</nobr>'
actionChanSplitClusters.setToolTip(tt)
self.connect(actionChanSplitClusters, QtCore.SIGNAL('triggered()'),
self.on_actionChanSplitClusters_triggered)
toolbar.addAction(actionChanSplitClusters)
actionDensitySplit = QAction('P', self)
tt = ('<nobr><b>P</b> Split cluster pair by density along line between '
'their centers</nobr>')
actionDensitySplit.setToolTip(tt)
self.connect(actionDensitySplit, QtCore.SIGNAL('triggered()'),
self.on_actionDensitySplit_triggered)
toolbar.addAction(actionDensitySplit)
actionRandomSplit = QAction('\\', self)
tt = ('<nobr><b>\\</b> Randomly split each selected cluster in half</nobr>')
actionRandomSplit.setToolTip(tt)
self.connect(actionRandomSplit, QtCore.SIGNAL('triggered()'),
self.on_actionRandomSplit_triggered)
toolbar.addAction(actionRandomSplit)
#actionRenumber = QAction(QIcon('res/gtk-edit.svg'), '#', self)
actionRenumber = QAction('#', self)
tt = ('<nobr><b>#</b> Renumber all clusters in vertical spatial order</nobr>\n'
'<nobr><b>CTRL+#</b> Renumber selected cluster</nobr>')
actionRenumber.setToolTip(tt)
self.connect(actionRenumber, QtCore.SIGNAL('triggered()'),
self.on_actionRenumber_triggered)
toolbar.addAction(actionRenumber)
actionFind = QAction(QIcon('res/edit-find.svg'), 'Find', self)
tt = ('<nobr><b>CTRL+F</b> Find spike in cluster plot</nobr>')
actionFind.setToolTip(tt)
self.connect(actionFind, QtCore.SIGNAL('triggered()'),
self.on_actionFind_triggered)
toolbar.addAction(actionFind)
actionSelectRandomSpikes = QAction('R', self)
tt = '<nobr><b>R</b> Select random sample of spikes of current clusters</nobr>'
actionSelectRandomSpikes.setToolTip(tt)
self.connect(actionSelectRandomSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionSelectRandomSpikes_triggered)
toolbar.addAction(actionSelectRandomSpikes)
actionToggleErrors = QAction('E', self)
actionToggleErrors.setCheckable(True)
actionToggleErrors.setChecked(self.panel.enable_fills)
tt = '<nobr><b>CTRL+E</b> Toggle visibility of template error limits</nobr>'
actionToggleErrors.setToolTip(tt)
self.connect(actionToggleErrors, QtCore.SIGNAL('toggled(bool)'),
self.on_actionToggleErrors_toggled)
toolbar.addAction(actionToggleErrors)
self.actionToggleErrors = actionToggleErrors
nsamplesComboBox = QtGui.QComboBox(self)
nsamplesComboBox.setToolTip('Number of spikes per cluster to randomly select')
nsamplesComboBox.setFocusPolicy(Qt.NoFocus)
nsamplesComboBox.addItems(['100', '50', '20', '10', '5', '1'])
nsamplesComboBox.setCurrentIndex(2)
toolbar.addWidget(nsamplesComboBox)
self.connect(nsamplesComboBox, QtCore.SIGNAL('activated(int)'),
self.on_actionSelectRandomSpikes_triggered)
self.nsamplesComboBox = nsamplesComboBox
gainComboBox = QtGui.QComboBox(self)
gainComboBox.setToolTip('Waveform gain (default: 1.5)')
gainComboBox.setFocusPolicy(Qt.NoFocus)
gainComboBox.addItems(['4', '3.75', '3.5', '3.25', '3', '2.75', '2.5', '2.25', '2',
'1.75', '1.5', '1.25', '1', '0.75', '0.5', '0.25'])
gainComboBox.setCurrentIndex(3)
toolbar.addWidget(gainComboBox)
self.connect(gainComboBox, QtCore.SIGNAL('activated(int)'),
self.on_gainComboBox_triggered)
self.gainComboBox = gainComboBox
#actionAlignMin = QAction(QIcon('res/go-bottom.svg'), 'Min', self)
actionAlignMin = QAction('Min', self)
actionAlignMin.setToolTip('Align selected spikes to min')
self.connect(actionAlignMin, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMin_triggered)
toolbar.addAction(actionAlignMin)
#actionAlignMax = QAction(QIcon('res/go-top.svg'), 'Max', self)
actionAlignMax = QAction('Max', self)
actionAlignMax.setToolTip('Align selected spikes to max')
self.connect(actionAlignMax, QtCore.SIGNAL('triggered()'),
self.on_actionAlignMax_triggered)
toolbar.addAction(actionAlignMax)
#actionAlignBest = QAction(QIcon('res/emblem-OK.png'), 'Best', self)
actionAlignBest = QAction('B', self)
tt = '<nobr><b>B</b> Align selected spikes by best fit</nobr>'
actionAlignBest.setToolTip(tt)
self.connect(actionAlignBest, QtCore.SIGNAL('triggered()'),
self.on_actionAlignBest_triggered)
toolbar.addAction(actionAlignBest)
actionShiftLeft = QAction('[', self)
tt = ('<nobr><b>[</b> Shift selected spikes 2 points left</nobr>\n'
'<nobr><b>CTRL+[</b> Shift selected spikes 1 point left</nobr>')
actionShiftLeft.setToolTip(tt)
self.connect(actionShiftLeft, QtCore.SIGNAL('triggered()'),
self.on_actionShiftLeft_triggered)
toolbar.addAction(actionShiftLeft)
actionShiftRight = QAction(']', self)
tt = ('<nobr><b>]</b> Shift selected spikes 2 points right</nobr>\n'
'<nobr><b>CTRL+]</b> Shift selected spikes 1 point right</nobr>')
actionShiftRight.setToolTip(tt)
self.connect(actionShiftRight, QtCore.SIGNAL('triggered()'),
self.on_actionShiftRight_triggered)
toolbar.addAction(actionShiftRight)
incltComboBox = QtGui.QComboBox(self)
incltComboBox.setToolTip("Waveform duration (us) to include for component "
"analysis,\nasymmetric around spike time")
incltComboBox.setFocusPolicy(Qt.NoFocus)
dtw = self.sort.tw[1] - self.sort.tw[0] # spike time window width
incltstep = intround(dtw / 10) # evenly spaced inclt values
incltvals = np.arange(dtw, 0, -incltstep)
incltComboBox.addItems([ str(incltval) for incltval in incltvals ])
incltComboBox.setCurrentIndex(0)
toolbar.addWidget(incltComboBox)
self.connect(incltComboBox, QtCore.SIGNAL('activated(int)'),
self.on_incltComboBox_triggered)
self.incltComboBox = incltComboBox
#incltunitsLabel = QtGui.QLabel('us', self)
#toolbar.addWidget(incltunitsLabel)
nPCsPerChanSpinBox = QtGui.QSpinBox(self)
nPCsPerChanSpinBox.setToolTip("Number of PCs to use per channel to feed into ICA")
nPCsPerChanSpinBox.setFocusPolicy(Qt.NoFocus)
toolbar.addWidget(nPCsPerChanSpinBox)
nPCsPerChanSpinBox.setMinimum(1)
self.connect(nPCsPerChanSpinBox, QtCore.SIGNAL('valueChanged(int)'),
self.on_nPCsPerChanSpinBox_valueChanged)
nPCsPerChanSpinBox.setValue(self.sort.npcsperchan)
self.nPCsPerChanSpinBox = nPCsPerChanSpinBox
#actionFindPrevMostSimilar = QAction(QIcon('res/go-previous.svg'), '<', self)
actionFindPrevMostSimilar = QAction('<', self)
tt = '<nobr><b><</b> Find previous most similar cluster</nobr>'
actionFindPrevMostSimilar.setToolTip(tt)
self.connect(actionFindPrevMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindPrevMostSimilar_triggered)
toolbar.addAction(actionFindPrevMostSimilar)
#actionFindNextMostSimilar = QAction(QIcon('res/go-next.svg'), '>', self)
actionFindNextMostSimilar = QAction('>', self)
tt = '<nobr><b>></b> Find next most similar cluster</nobr>'
actionFindNextMostSimilar.setToolTip(tt)
self.connect(actionFindNextMostSimilar, QtCore.SIGNAL('triggered()'),
self.on_actionFindNextMostSimilar_triggered)
toolbar.addAction(actionFindNextMostSimilar)
actionReloadSpikes = QAction(QIcon('res/view-refresh.svg'), 'Reload', self)
tt = ('<nobr><b>F5</b> Reload waveforms of selected spikes. '
'If none selected, reload all</nobr>\n'
'<nobr><b>CTRL+F5</b> Use mean waveform to choose chans to reload</nobr>')
actionReloadSpikes.setToolTip(tt)
self.connect(actionReloadSpikes, QtCore.SIGNAL('triggered()'),
self.on_actionReloadSpikes_triggered)
toolbar.addAction(actionReloadSpikes)
actionSave = QAction(QIcon('res/document-save.svg'), '&Save', self)
actionSave.setToolTip('Save sort panel to file')
self.connect(actionSave, QtCore.SIGNAL('triggered()'),
self.on_actionSave_triggered)
toolbar.addAction(actionSave)
return toolbar
def get_sort(self):
return self.spykewindow.sort
sort = property(get_sort) # make this a property for proper behaviour after unpickling
def closeEvent(self, event):
self.spykewindow.HideWindow('Sort')
def mousePressEvent(self, event):
"""These are mostly passed on up from spyke list views and sort panel. Left
clicks are (or should be) filtered out"""
buttons = event.buttons()
if buttons == QtCore.Qt.MiddleButton:
#self.on_actionSelectRandomSpikes_triggered()
self.spykewindow.ui.plotButton.click() # same as hitting ENTER in nslist
elif buttons == QtCore.Qt.RightButton:
self.clear()
def keyPressEvent(self, event):
"""Alpha character keypresses are by default caught by the child lists for quickly
scrolling down to and selecting list items. However, the appropriate alpha
keypresses have been set in the child lists to be ignored, so they propagate
up to here"""
key = event.key()
modifiers = event.modifiers()
ctrl = modifiers & Qt.ControlModifier # ctrl is down
spw = self.spykewindow
if key == Qt.Key_A: # ignored in SpykeListViews
spw.ui.plotButton.click() # same as hitting ENTER in nslist
elif key == Qt.Key_X: # ignored in SpykeListViews
spw.ui.plotXcorrsButton.click()
elif key == Qt.Key_N: # ignored in SpykeListViews
spw.ui.normButton.click()
elif key == Qt.Key_Escape: # deselect all spikes and all clusters
self.clear()
elif key == Qt.Key_Delete:
self.on_actionDelete_triggered()
elif key == Qt.Key_M: # ignored in SpykeListViews
self.on_actionMergeClusters_triggered()
elif key == Qt.Key_G: # ignored in SpykeListViews
self.on_actionToggleClustersGood_triggered()
elif key == Qt.Key_Equal: # ignored in SpykeListViews
self.on_actionSplit_triggered()
elif key == Qt.Key_Minus: # ignored in SpykeListViews
self.on_actionLabelMultiunit_triggered()
elif key == Qt.Key_Slash: # ignored in SpykeListViews
self.on_actionChanSplitClusters_triggered()
elif key == Qt.Key_P: # ignored in SpykeListViews
self.on_actionDensitySplit_triggered()
elif key == Qt.Key_Backslash: # ignored in SpykeListViews
self.on_actionRandomSplit_triggered()
elif key == Qt.Key_NumberSign: # ignored in SpykeListViews
self.on_actionRenumber_triggered()
elif key == Qt.Key_F: # ignored in SpykeListViews
if ctrl:
self.FindSpike()
else:
self.FindCluster()
elif key == Qt.Key_R: # ignored in SpykeListViews
self.on_actionSelectRandomSpikes_triggered()
elif key == Qt.Key_Space: # ignored in SpykeListViews
if ctrl:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
else:
spw.on_clusterButton_clicked()
elif key == Qt.Key_B: # ignored in SpykeListViews
self.on_actionAlignBest_triggered()
elif key == Qt.Key_BracketLeft: # ignored in SpykeListViews
self.on_actionShiftLeft_triggered()
elif key == Qt.Key_BracketRight: # ignored in SpykeListViews
self.on_actionShiftRight_triggered()
elif key == Qt.Key_Comma: # ignored in SpykeListViews
self.on_actionFindPrevMostSimilar_triggered()
elif key == Qt.Key_Period: # ignored in SpykeListViews
self.on_actionFindNextMostSimilar_triggered()
elif key == Qt.Key_F5: # ignored in SpykeListViews
self.on_actionReloadSpikes_triggered()
elif key == Qt.Key_E: # ignored in SpykeListViews
if ctrl:
self.actionToggleErrors.toggle()
else:
self.clear() # E is synonymous with ESC
elif key == Qt.Key_C: # toggle between PCA and ICA, ignored in SpykeListViews
c = str(spw.ui.componentAnalysisComboBox.currentText())
if c == 'PCA':
index = spw.ui.componentAnalysisComboBox.findText('ICA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
elif c == 'ICA':
index = spw.ui.componentAnalysisComboBox.findText('PCA')
spw.ui.componentAnalysisComboBox.setCurrentIndex(index)
spw.on_plotButton_clicked()
elif key == Qt.Key_T: # toggle plotting against time, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 't':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.on_c0c1tButton_clicked() # plot against time
elif key == Qt.Key_W: # toggle plotting against RMSError, ignored in SpykeListViews
z = str(spw.ui.zDimComboBox.currentText())
if z == 'RMSerror':
spw.on_c0c1c2Button_clicked() # plot in pure component analysis space
else:
spw.ui.zDimComboBox.setCurrentIndex(3)
spw.on_plotButton_clicked() # plot against RMSError
elif key in [Qt.Key_Enter, Qt.Key_Return]:
# this is handled at a lower level by on_actionItem_triggered
# in the various listview controls
pass
else:
SpykeToolWindow.keyPressEvent(self, event) # pass it on
def clear(self):
"""Clear selections in this order: unsorted spikes, sorted spikes,
cluster automatically selected for comparison, cluster 0, clusters"""
spw = self.spykewindow
clusters = spw.GetClusters()
if len(self.uslist.selectedIndexes()) > 0:
self.uslist.clearSelection()
elif self.nslist.nrowsSelected > 0:
self.nslist.clearSelection()
elif len(clusters) == 2 and self._source in clusters:
clusters.remove(self._source)
spw.SelectClusters(clusters, on=False)
elif 0 in spw.GetClusterIDs():
for cluster in spw.GetClusters():
if cluster.id == 0:
spw.SelectClusters([cluster], on=False)
break
else:
self.nlist.clearSelection()
# reset colours in cluster plot:
gw = spw.windows['Cluster'].glWidget
gw.colour()
gw.updateGL()
def on_actionDelete_triggered(self):
"""Delete explicity selected spikes, or clusters"""
selsids = self.spykewindow.GetSpikes() # IDs of explicitly selected spikes
nselsids = len(selsids)
if (QApplication.instance().keyboardModifiers() & Qt.ControlModifier
or nselsids > 0):
self.delete_spikes()
else:
self.delete_clusters()
def delete_clusters(self):
"""Del button press/click"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'delete clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# deselect and delete clusters
spw.DelClusters(clusters)
if len(s.clusters) > 0:
# select cluster that replaces the first of the deleted clusters in norder
selrows = [ cc.oldnorder.index(oldunid) for oldunid in cc.oldunids ]
if len(selrows) > 0:
selrow = selrows[0]
nlist = spw.windows['Sort'].nlist
nlist.selectRows(selrow) # TODO: this sets selection, but not focus
#else: # first of deleted clusters was last in norder, don't select anything
# save more undo/redo stuff
newclusters = []
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def delete_spikes(self):
"""CTRL+Del button press/click"""
self.spykewindow.SplitSpikes(delete=True)
def on_actionSplit_triggered(self):
"""+ button click. Split off selected clusters into their own cluster"""
self.spykewindow.SplitSpikes(delete=False)
def on_actionMergeClusters_triggered(self):
"""Merge button (M) click. Merge selected clusters. Easier to use than
running gac() on selected clusters using a really big sigma to force
them to all merge"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
sids = [] # spikes to merge
for cluster in clusters:
sids.append(cluster.neuron.sids)
# merge any selected usids as well
sids.append(spw.GetUnsortedSpikes())
sids = np.concatenate(sids)
if len(sids) == 0:
return
# save some undo/redo stuff
message = 'merge clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# decide on newnid and where to insert it into norder
newnid = None # merge by default into a new highest numbered nid
inserti = None # order new cluster by default to end of nlist
if len(clusters) == 1:
# keep same position of this one nid in norder, regardless of whether it's
# single-unit, multiunit, or junk
inserti = s.norder.index(clusters[0].id)
elif len(clusters) > 1:
oldunids = np.asarray(cc.oldunids)
suids = oldunids[oldunids > 0] # selected single unit nids
if len(suids) > 0: # merge into largest selected single unit nid:
spikecounts = np.asarray([ s.neurons[suid].nspikes for suid in suids ])
newnid = suids[spikecounts.argmax()]
inserti = s.norder.index(newnid)
# correct for shift due to deletion of oldunids that precede newnid in norder:
inserti -= sum([ s.norder.index(oldunid) < inserti for oldunid in oldunids])
# delete selected clusters and deselect selected usids
spw.DelClusters(clusters, update=False)
self.uslist.clearSelection()
# create new cluster
#t0 = time.time()
newcluster = spw.CreateCluster(update=False, id=newnid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
plotdims = spw.GetClusterPlotDims()
newcluster.update_pos()
# save more undo/redo stuff
cc.save_new([newcluster], s.norder, s.good)
spw.AddClusterChangeToStack(cc)
# now do some final updates
spw.UpdateClustersGUI()
spw.ColourPoints(newcluster)
#print('applying clusters to plot took %.3f sec' % (time.time()-t0))
# select newly created cluster
spw.SelectClusters(newcluster)
cc.message += ' into cluster %d' % newcluster.id
print(cc.message)
def on_actionToggleClustersGood_triggered(self):
"""'Good' button (G) click. Toggle 'good' flag of all selected clusters"""
spw = self.spykewindow
clusters = spw.GetClusters()
cids = []
for cluster in clusters:
cluster.neuron.good = not cluster.neuron.good
cids.append(cluster.id)
self.nlist.updateAll() # nlist item colouring will change as a result
print("Toggled 'good' flag of clusters %r" % cids)
def on_actionLabelMultiunit_triggered(self):
"""- button click. Label all selected clusters as multiunit by deleting them
and creating new ones with -ve IDs"""
spw = self.spykewindow
clusters = spw.GetClusters()
s = self.sort
spikes = s.spikes
# only relabel single unit clusters:
clusters = [ cluster for cluster in clusters if cluster.id > 0 ]
if len(clusters) == 0:
return
sids = []
for cluster in clusters:
sids.append(cluster.neuron.sids)
sids = np.concatenate(sids)
# save some undo/redo stuff
message = 'label as multiunit clusters %r' % [ c.id for c in clusters ]
cc = ClusterChange(sids, spikes, message)
cc.save_old(clusters, s.norder, s.good)
# delete old clusters
inserti = s.norder.index(clusters[0].id)
# collect cluster sids before cluster deletion
sidss = [ cluster.neuron.sids for cluster in clusters ]
spw.DelClusters(clusters, update=False)
# create new multiunit clusters
newclusters = []
for sids in sidss:
muid = s.get_nextmuid()
newcluster = spw.CreateCluster(update=False, id=muid, inserti=inserti)
neuron = newcluster.neuron
self.MoveSpikes2Neuron(sids, neuron, update=False)
newcluster.update_pos()
newclusters.append(newcluster)
inserti += 1
# select newly labelled multiunit clusters
spw.SelectClusters(newclusters)
# save more undo/redo stuff
cc.save_new(newclusters, s.norder, s.good)
spw.AddClusterChangeToStack(cc)
print(cc.message)
def on_actionChanSplitClusters_triggered(self):
"""Split by channels button (/) click"""
## TODO: make sure this works on .srf files! Why was chancombosplit being used?
self.spykewindow.maxchansplit()
#self.spykewindow.chancombosplit()
def on_actionDensitySplit_triggered(self):
"""Split cluster pair by density along line between their centers"""
self.spykewindow.densitysplit()
def on_actionRandomSplit_triggered(self):
"""Randomly split each selected cluster in half"""
self.spykewindow.randomsplit()
def on_actionRenumber_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
self.renumber_selected_cluster()
else:
self.renumber_all_clusters()
def renumber_selected_cluster(self):
"""Renumber a single selected cluster to whatever free ID the user wants, for
colouring purposes"""
spw = self.spykewindow
s = self.sort
spikes = s.spikes
cluster = spw.GetCluster() # exactly one selected cluster
oldid = cluster.id
newid = max(s.norder) + 1
newid, ok = QtGui.QInputDialog.getInt(self, "Renumber cluster",
"This will clear the undo/redo stack, and is not undoable.\n"
"Enter new ID:", value=newid)
if not ok:
return
if newid in s.norder:
print("Choose a non-existing nid to renumber to")
return
# deselect cluster
spw.SelectClusters(cluster, on=False)
# rename to newid
cluster.id = newid # this indirectly updates neuron.id
# update cluster and neuron dicts, and spikes array
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
# remove duplicate oldid dict entries
del s.clusters[oldid]
del s.neurons[oldid]
# replace oldid with newid in norder
s.norder[s.norder.index(oldid)] = newid
# update colour of any relevant points in cluster plot
spw.ColourPoints(cluster)
# reselect cluster
spw.SelectClusters(cluster)
# some cluster changes in stack may no longer be applicable, reset cchanges
del spw.cchanges[:]
spw.cci = -1
print('Renumbered neuron %d to %d' % (oldid, newid))
def renumber_all_clusters(self):
"""Renumber single unit clusters consecutively from 1, ordered by y position. Do the
same for multiunit (-ve number) clusters, starting from -1. Sorting by y position
makes user inspection of clusters more orderly, makes the presence of duplicate
clusters more obvious, and allows for maximal spatial separation between clusters of
the same colour, reducing colour conflicts"""
val = QtGui.QMessageBox.question(self.panel, "Renumber all clusters",
"Are you sure? This will clear the undo/redo stack, and is not undoable.",
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if val == QtGui.QMessageBox.No:
return
spw = self.spykewindow
s = self.sort
spikes = s.spikes
# get spatially and numerically ordered lists of new ids
oldids = np.asarray(s.norder)
oldsuids = oldids[oldids > 0]
oldmuids = oldids[oldids < 0]
# this is a bit confusing: find indices that would sort old ids by y pos, but then
# what you really want is to find the y pos *rank* of each old id, so you need to
# take argsort again:
newsuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldsuids ]).argsort().argsort() + 1
newmuids = np.asarray([ s.clusters[cid].pos['y0']
for cid in oldmuids ]).argsort().argsort() + 1
newmuids = -newmuids
# multiunit, followed by single unit, no 0 junk cluster. Can't seem to do it the other
# way around as of Qt 4.7.2 - it seems QListViews don't like having a -ve value in
# the last entry. Doing so causes all 2 digit values in the list to become blank,
# suggests a spacing calculation bug. Reproduce by making last entry multiunit,
# undoing then redoing. Actually, maybe the bug is it doesn't like having a number
# in the last entry with fewer digits than the preceding entry. Only seems to be a
# problem when setting self.setUniformItemSizes(True).
newids = np.concatenate([newmuids, newsuids])
# test
if np.all(oldids == newids):
print('Nothing to renumber: cluster IDs already ordered in y0 and contiguous')
return
# update for replacing oldids with newids
oldids = np.concatenate([oldmuids, oldsuids])
# deselect current selections
selclusters = spw.GetClusters()
oldselids = [ cluster.id for cluster in selclusters ]
spw.SelectClusters(selclusters, on=False)
# delete junk cluster, if it exists
if 0 in s.clusters:
s.remove_neuron(0)
print('Deleted junk cluster 0')
if 0 in oldselids:
oldselids.remove(0)
# replace old ids with new ids
cw = spw.windows['Cluster']
oldclusters = s.clusters.copy() # no need to deepcopy, just copy refs, not clusters
dims = spw.GetClusterPlotDims()
for oldid, newid in zip(oldids, newids):
newid = int(newid) # keep as Python int, not numpy int
if oldid == newid:
continue # no need to waste time removing and recreating this cluster
# change all occurences of oldid to newid
cluster = oldclusters[oldid]
cluster.id = newid # this indirectly updates neuron.id
# update cluster and neuron dicts
s.clusters[newid] = cluster
s.neurons[newid] = cluster.neuron
sids = cluster.neuron.sids
spikes['nid'][sids] = newid
# remove any orphaned cluster ids
for oldid in oldids:
if oldid not in newids:
del s.clusters[oldid]
del s.neurons[oldid]
# reset norder
s.norder = []
s.norder.extend(sorted([ int(newid) for newid in newmuids ])[::-1])
s.norder.extend(sorted([ int(newid) for newid in newsuids ]))
# now do some final updates
spw.UpdateClustersGUI()
spw.ColourPoints(s.clusters.values())
# reselect the previously selected (but now renumbered) clusters,
# helps user keep track
oldiis = [ list(oldids).index(oldselid) for oldselid in oldselids ]
newselids = newids[oldiis]
spw.SelectClusters([s.clusters[cid] for cid in newselids])
# all cluster changes in stack are no longer applicable, reset cchanges
del spw.cchanges[:]
spw.cci = -1
print('Renumbering complete')
def on_actionFind_triggered(self):
"""Find current cluster or spike"""
ctrl = QApplication.instance().keyboardModifiers() & Qt.ControlModifier
if ctrl:
self.FindSpike()
else:
self.FindCluster()
def FindCluster(self):
"""Move focus to location of currently selected (single) cluster"""
spw = self.spykewindow
try:
cluster = spw.GetCluster()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
dims = spw.GetClusterPlotDims()
gw.focus = np.float32([ cluster.normpos[dim] for dim in dims ])
gw.panTo() # pan to new focus
gw.updateGL()
def FindSpike(self):
"""Move focus to location of currently selected (single) spike"""
spw = self.spykewindow
try:
sid = spw.GetSpike()
except RuntimeError as err:
print(err)
return
gw = spw.windows['Cluster'].glWidget
pointis = gw.sids.searchsorted(sid)
gw.focus = gw.points[pointis]
gw.panTo() # pan to new focus
gw.updateGL()
def on_actionSelectRandomSpikes_triggered(self):
"""Select random sample of spikes in current cluster(s), or random sample
of unsorted spikes if no cluster(S) selected"""
nsamples = int(self.nsamplesComboBox.currentText())
if len(self.nslist.neurons) > 0:
slist = self.nslist
else:
slist = self.uslist
slist.clearSelection() # emits selectionChanged signal, .reset() doesn't
slist.selectRandom(nsamples)
def on_gainComboBox_triggered(self):
"""Set gain of panel based on gainComboBox selection"""
panel = self.panel
panel.gain = float(self.gainComboBox.currentText())
panel.do_layout() # resets axes lims and recalcs panel.pos
panel._update_scale()
panel.draw_refs()
panel.updateAllItems()
def on_actionAlignMin_triggered(self):
self.Align('min')
def on_actionAlignMax_triggered(self):
self.Align('max')
def on_actionAlignBest_triggered(self):
self.Align('best')
def on_actionShiftLeft_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = -1
else:
nt = -2
self.Shift(nt)
def on_actionShiftRight_triggered(self):
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
nt = 1
else:
nt = 2
self.Shift(nt)
def on_incltComboBox_triggered(self):
"""Change length of chan selection lines, optionally trigger cluster replot"""
self.panel.update_selvrefs()
self.panel.draw_refs()
#self.spykewindow.ui.plotButton.click()
def get_inclt(self):
"""Return inclt value in incltComboBox"""
return float(self.incltComboBox.currentText()) # us
inclt = property(get_inclt)
def get_tis(self):
"""Return tis (start and end timepoint indices) of duration inclt, asymmetric around
t=0 spike time. Note that any changes to the code here should also be made in the
timepoint selection display code in SortPanel.update_selvrefs()"""
s = self.sort
inclt = self.inclt # duration to include, asymmetric around t=0 spike time (us)
tw = self.panel.tw
dtw = tw[1] - tw[0] # spike time window width
left = intround(abs(tw[0]) / dtw * inclt) # left fraction wrt t=0 spike time
right = inclt - left # right fraction wrt t=0 spike time
tis = s.twts.searchsorted([-left, right])
return tis
tis = property(get_tis)
def on_nPCsPerChanSpinBox_valueChanged(self, val):
self.sort.npcsperchan = val
def on_actionReloadSpikes_triggered(self):
spw = self.spykewindow
sids = spw.GetAllSpikes()
sort = self.sort
if len(sids) == 0:
# if no spikes specified, reload all spikes
sids = sort.spikes['id']
usemeanchans = False
if QApplication.instance().keyboardModifiers() & Qt.ControlModifier:
usemeanchans = True
sort.reload_spikes_and_templates(sids, usemeanchans=usemeanchans)
# add sids to the set of dirtysids to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots:
self.panel.updateAllItems()
def on_actionFindPrevMostSimilar_triggered(self):
self.findMostSimilarCluster('previous')
def on_actionFindNextMostSimilar_triggered(self):
self.findMostSimilarCluster('next')
def on_actionToggleErrors_toggled(self, checked):
self.panel.showFills(checked)
def on_slider_valueChanged(self, slideri):
self.nslist.clearSelection() # emits selectionChanged signal, .reset() doesn't
if self.nslist.model().sliding == False:
self.nslist.model().sids.sort() # change from nid order to sid order
self.nslist.updateAll() # update to reflect new ordering
self.nslist.model().sliding = True
nsamples = int(self.nsamplesComboBox.currentText())
rows = np.arange(slideri, slideri+nsamples)
self.nslist.selectRows(rows)
def on_slider_sliderPressed(self):
"""Make slider click (without movement) highlight the first nsamples
or fewer spikes when slider is at 0 position"""
slideri = self.slider.value()
if slideri == 0:
nsamples = int(self.nsamplesComboBox.currentText())
nsamples = min(nsamples, self.nslist.model().nspikes)
rows = np.arange(nsamples)
self.nslist.selectRows(rows)
def update_slider(self):
"""Update slider limits and step sizes"""
nsamples = int(self.nsamplesComboBox.currentText())
nsids = len(self.nslist.sids)
ulim = max(nsids-nsamples, 1) # upper limit
self.slider.setRange(0, ulim)
self.slider.setSingleStep(1)
self.slider.setPageStep(nsamples)
def findMostSimilarCluster(self, which='next'):
"""If no chans selected, compare source to next or previous most similar cluster
based on chans the two have in common, while requiring the two have each others'
max chans in common. If chans have been selected, use them as a starting set of
chans to compare on. Also, use only the timepoint range selected in incltComboBox"""
try:
source = self.getClusterComparisonSource()
except RuntimeError as err:
print(err)
return
destinations = list(self.sort.clusters.values())
destinations.remove(source)
selchans = np.sort(self.panel.chans_selected)
if len(selchans) > 0:
srcchans = np.intersect1d(source.neuron.wave.chans, selchans)
if len(srcchans) == 0:
print("Source cluster doesn't overlap with selected chans")
return
else:
srcchans = source.neuron.wave.chans
if self.spykewindow.ui.normButton.isChecked():
print("NOTE: findMostSimilarCluster() doesn't currently take spike amplitude "
"normalization into account. To see the true amplitudes used to compare "
"neuron pairs, turn off normalization")
errors = []
dests = []
t0i, t1i = self.tis # timepoint range selected in incltComboBox
# try and compare source neuron waveform to all destination neuron waveforms
for dest in destinations:
if dest.neuron.wave.data is None: # hasn't been calculated yet
dest.neuron.update_wave()
dstchans = dest.neuron.wave.chans
if len(selchans) > 0:
if not set(selchans).issubset(dstchans):
continue
dstchans = selchans
cmpchans = np.intersect1d(srcchans, dstchans)
if len(cmpchans) == 0: # not comparable
continue
# ensure maxchan of both source and dest neuron are both in cmpchans
if source.neuron.chan not in cmpchans or dest.neuron.chan not in cmpchans:
continue
srcwavedata = source.neuron.wave[cmpchans].data[:, t0i:t1i]
dstwavedata = dest.neuron.wave[cmpchans].data[:, t0i:t1i]
error = core.rms(srcwavedata - dstwavedata)
errors.append(error)
dests.append(dest)
if len(errors) == 0:
print("No sufficiently overlapping clusters on selected chans to compare to")
return
errors = np.asarray(errors)
dests = np.asarray(dests)
desterrsortis = errors.argsort()
if which == 'next':
self._cmpid += 1
elif which == 'previous':
self._cmpid -= 1
else: raise ValueError('Unknown which: %r' % which)
self._cmpid = max(self._cmpid, 0)
self._cmpid = min(self._cmpid, len(dests)-1)
dest = dests[desterrsortis][self._cmpid]
self.spykewindow.SelectClusters(dest)
desterr = errors[desterrsortis][self._cmpid]
print('n%d to n%d rmserror: %.2f uV' %
(source.id, dest.id, self.sort.converter.AD2uV(desterr)))
def getClusterComparisonSource(self):
selclusters = self.spykewindow.GetClusters()
errmsg = 'unclear which cluster to use as source for comparison'
if len(selclusters) == 1:
source = selclusters[0]
self._source = source
self._cmpid = -1 # init/reset
elif len(selclusters) == 2:
source = self._source
if source not in selclusters:
raise RuntimeError(errmsg)
# deselect old destination cluster:
selclusters.remove(source)
self.spykewindow.SelectClusters(selclusters, on=False)
else:
self._source = None # reset for tidiness
raise RuntimeError(errmsg)
return source
def Shift(self, nt):
"""Shift selected sids by nt timepoints"""
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
self.sort.shift(sids, nt)
print('Shifted %d spikes by %d timepoints' % (len(sids), nt))
unids = np.unique(spikes['nid'][sids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(sids)
# auto-refresh all plots
self.panel.updateAllItems()
def Align(self, to):
"""Align all implicitly selected spikes to min or max, or best fit
on selected chans"""
s = self.sort
spikes = s.spikes
spw = self.spykewindow
sids = np.concatenate((spw.GetClusterSpikes(), spw.GetUnsortedSpikes()))
if to == 'best':
tis = self.tis
# find which chans are common to all sids:
commonchans = s.get_common_chans(sids)[0]
# check selected chans
selchans = spw.get_selchans(sids)
for selchan in selchans:
if selchan not in commonchans:
print("Chan %d not common to all spikes, pick from %r"
% (selchan, list(commonchans)))
return
print('Best fit aligning %d spikes between tis=%r on chans=%r' %
(len(sids), list(tis), selchans))
# numpy implementation:
#dirtysids = s.alignbest(sids, tis, selchans)
# cython implementation:
dirtysids = util.alignbest_cy(s, sids, tis, np.int64(selchans))
else: # to in ['min', 'max']
print('Aligning %d spikes to %s' % (len(sids), to))
dirtysids = s.alignminmax(sids, to)
paligned = len(dirtysids) / len(sids) * 100
print('Aligned %d/%d (%.1f%%) spikes' % (len(dirtysids), len(sids), paligned))
unids = np.unique(spikes['nid'][dirtysids])
neurons = [ s.neurons[nid] for nid in unids ]
for neuron in neurons:
neuron.update_wave() # update affected mean waveforms
# add dirtysids to the set to be resaved to .wave file:
spw.update_dirtysids(dirtysids)
# auto-refresh all plots:
self.panel.updateAllItems()
def RemoveNeuron(self, neuron, update=True):
"""Remove neuron and all its spikes from the GUI and the Sort"""
self.MoveSpikes2List(neuron, neuron.sids, update=update)
self.sort.remove_neuron(neuron.id)
if update:
self.nlist.updateAll()
def MoveSpikes2Neuron(self, sids, neuron=None, update=True):
"""Assign spikes from sort.spikes to a neuron, and trigger eventual update of
mean wave. If neuron is None, create a new one"""
sids = toiter(sids)
spikes = self.sort.spikes
if neuron == None:
neuron = self.sort.create_neuron()
neuron.sids = np.union1d(neuron.sids, sids) # update
spikes['nid'][sids] = neuron.id
if update:
self.sort.update_usids()
self.uslist.updateAll()
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # trigger nslist refresh
# TODO: selection doesn't seem to be working, always jumps to top of list
#self.uslist.Select(row) # automatically select the new item at that position
neuron.wave.data = None # trigger template mean update
return neuron
def MoveSpikes2List(self, neuron, sids, update=True):
"""Move spikes from a neuron back to the unsorted spike list control"""
sids = toiter(sids)
if len(sids) == 0:
return # nothing to do
spikes = self.sort.spikes
neuron.sids = np.setdiff1d(neuron.sids, sids) # return what's in 1st arr and not in 2nd
spikes['nid'][sids] = 0 # unbind neuron id of sids in spikes struct array
if update:
self.sort.update_usids()
self.uslist.updateAll()
# this only makes sense if the neuron is currently selected in the nlist:
if neuron in self.nslist.neurons:
self.nslist.neurons = self.nslist.neurons # this triggers a refresh
neuron.wave.data = None # triggers an update when it's actually needed
def PlotClusterHistogram(self, X, nids):
"""Plot histogram of given clusters along a single dimension. If two clusters are
given, project them onto axis connecting their centers, and calculate separation
indices between them. Otherwise, plot the distribution of all given clusters
(up to a limit) along the first dimension in X."""
spw = self.spykewindow
mplw = spw.OpenWindow('MPL')
unids = np.unique(nids) # each unid corresponds to a cluster, except possibly unid 0
nclusters = len(unids)
if nclusters == 0:
mplw.ax.clear()
mplw.figurecanvas.draw()
print("No spikes selected")
return
elif nclusters > 5: # to prevent slowdowns, don't plot too many
mplw.ax.clear()
mplw.figurecanvas.draw()
print("Too many clusters selected for cluster histogram")
return
elif nclusters == 2:
calc_measures = True
else:
calc_measures = False
projdimi = 0
ndims = X.shape[1]
points = [] # list of projection of each cluster's points onto dimi
for unid in unids:
sidis, = np.where(nids == unid)
# don't seem to need contig points for NDsepmetric, no need for copy:
points.append(X[sidis])
#points.append(np.ascontiguousarray(X[sidis]))
if calc_measures:
t0 = time.time()
NDsep = util.NDsepmetric(*points, Nmax=20000)
print('NDsep calc took %.3f sec' % (time.time()-t0))
# centers of both clusters, use median:
c0 = np.median(points[0], axis=0) # ndims vector
c1 = np.median(points[1], axis=0)
# line connecting the centers of the two clusters, wrt c0
line = c1-c0
line /= np.linalg.norm(line) # make it unit length
#print('c0=%r, c1=%r, line=%r' % (c0, c1, line))
else:
line = np.zeros(ndims)
line[projdimi] = 1.0 # pick out just the one component
c0 = 0.0 # set origin at 0
# calculate projection of each cluster's points onto line
projs = []
for cpoints in points:
projs.append(np.dot(cpoints-c0, line))
if calc_measures:
d = np.median(projs[1]) - np.median(projs[0])
# measure whether centers are at least 3 of the bigger stdevs away from
# each other:
maxstd = max(projs[0].std(), projs[1].std())
if maxstd == 0:
oneDsep = 0 # not sure if this is ideal
else:
oneDsep = d / (3 * maxstd)
#print('std0=%f, std1=%f, d=%f' % (projs[0].std(), projs[1].std(), d))
proj = np.concatenate(projs)
nbins = max(intround(np.sqrt(len(proj))), 2) # seems like a good heuristic
#print('nbins = %d' % nbins)
edges = np.histogram(proj, bins=nbins)[1]
hists = []
for i in range(nclusters):
hists.append(np.histogram(projs[i], bins=edges)[0])
hist = np.concatenate([hists]) # one cluster hist per row
masses = np.asarray([ h.sum() for h in hist ])
sortedmassis = masses.argsort()
# Take the fraction of area that the two distribs overlap.
# At each bin, take min value of the two distribs. Add up all those min values,
# and divide by the mass of the smaller distrib.
if calc_measures:
overlaparearatio = hist.min(axis=0).sum() / masses[sortedmassis[0]]
djs = core.DJS(hists[0], hists[1])
# plotting:
ledges = edges[:-1] # keep just the left edges, discard the last right edge
assert len(ledges) == nbins
binwidth = ledges[1] - ledges[0]
# plot:
a = mplw.ax
a.clear()
windowtitle = "clusters %r" % list(unids)
print(windowtitle)
mplw.setWindowTitle(windowtitle)
if calc_measures:
#title = ("sep index=%.3f, overlap area ratio=%.3f, DJS=%.3f, sqrt(DJS)=%.3f"
# % (oneDsep, overlaparearatio, djs, np.sqrt(djs)))
title = ("%dDsep=%.3f, 1Dsep=%.3f, OAR=%.3f, DJS=%.3f"
% (ndims, NDsep, oneDsep, overlaparearatio, djs))
print(title)
a.set_title(title)
cs = [ CLUSTERCOLOURDICT[unid] for unid in unids ]
for i, c in enumerate(cs):
# due to white background, replace white clusters with black:
if c == WHITE:
cs[i] = 'black'
# plot the smaller cluster last, to maximize visibility:
for i in sortedmassis[::-1]:
a.bar(ledges, hist[i], width=binwidth, color=cs[i], edgecolor=cs[i])
## TODO: tight_layout call needs updating for MPL 2.2:
#mplw.f.tight_layout(pad=0.3) # crop figure to contents
mplw.figurecanvas.draw()
| 47.437693 | 98 | 0.600495 |
f71a7085403e8ce0a19e0672e598aeec15a4a023 | 899 | py | Python | examples/show_debug.py | Matuiss2/python-sc2 | dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953 | [
"MIT"
] | 2 | 2019-01-23T19:11:53.000Z | 2019-04-05T17:45:49.000Z | examples/show_debug.py | Matuiss2/python-sc2 | dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953 | [
"MIT"
] | null | null | null | examples/show_debug.py | Matuiss2/python-sc2 | dd93215d8b09b7ddacfd5c3cc4e9f43641d3f953 | [
"MIT"
] | 1 | 2019-04-24T13:31:20.000Z | 2019-04-24T13:31:20.000Z | import sc2
from sc2 import run_game, maps, Race, Difficulty
from sc2.player import Bot, Computer
class MyBot(sc2.BotAI):
async def on_step(self, iteration):
for structure in self.structures:
self._client.debug_text_world(
"\n".join([
f"{structure.type_id.name}:{structure.type_id.value}",
f"({structure.position.x:.2f},{structure.position.y:.2f})",
f"{structure.build_progress:.2f}",
] + [repr(x) for x in structure.orders]),
structure.position3d,
color=(0, 255, 0),
size=12,
)
await self._client.send_debug()
def main():
run_game(maps.get("Abyssal Reef LE"), [
Bot(Race.Terran, MyBot()),
Computer(Race.Protoss, Difficulty.Medium)
], realtime=True)
if __name__ == '__main__':
main()
| 31 | 79 | 0.558398 |
f71a71c02c39541a49fbe5ad95d204ca99999495 | 1,129 | py | Python | migrations/versions/0076_add_intl_flag_to_provider.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 41 | 2019-11-28T16:58:41.000Z | 2022-01-28T21:11:16.000Z | migrations/versions/0076_add_intl_flag_to_provider.py | cds-snc/notification-api | b1c1064f291eb860b494c3fa65ac256ad70bf47c | [
"MIT"
] | 1,083 | 2019-07-08T12:57:24.000Z | 2022-03-08T18:53:40.000Z | migrations/versions/0076_add_intl_flag_to_provider.py | cds-snc/notifier-api | 90b385ec49efbaee7e607516fc7d9f08991af813 | [
"MIT"
] | 9 | 2020-01-24T19:56:43.000Z | 2022-01-27T21:36:53.000Z | """empty message
Revision ID: 0076_add_intl_flag_to_provider
Revises: 0075_create_rates_table
Create Date: 2017-04-25 09:44:13.194164
"""
# revision identifiers, used by Alembic.
revision = "0076_add_intl_flag_to_provider"
down_revision = "0075_create_rates_table"
import sqlalchemy as sa
from alembic import op
def upgrade():
op.add_column(
"provider_details",
sa.Column(
"supports_international",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
op.add_column(
"provider_details_history",
sa.Column(
"supports_international",
sa.Boolean(),
nullable=False,
server_default=sa.false(),
),
)
op.execute("UPDATE provider_details SET supports_international=True WHERE identifier='mmg'")
op.execute("UPDATE provider_details_history SET supports_international=True WHERE identifier='mmg'")
def downgrade():
op.drop_column("provider_details_history", "supports_international")
op.drop_column("provider_details", "supports_international")
| 25.659091 | 104 | 0.675819 |
f71aa357327a98795cb190e3909dda5f261e7b6a | 25,206 | py | Python | acore/classifier_cov_pow_toy_pvalue.py | zhao-david/ACORE-LFI | 91de88b77f0be110e42ed91bbb7a50b7ca83319a | [
"MIT"
] | null | null | null | acore/classifier_cov_pow_toy_pvalue.py | zhao-david/ACORE-LFI | 91de88b77f0be110e42ed91bbb7a50b7ca83319a | [
"MIT"
] | null | null | null | acore/classifier_cov_pow_toy_pvalue.py | zhao-david/ACORE-LFI | 91de88b77f0be110e42ed91bbb7a50b7ca83319a | [
"MIT"
] | null | null | null | from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import argparse
import pandas as pd
from tqdm.auto import tqdm
from datetime import datetime
from sklearn.metrics import log_loss
import seaborn as sns
import matplotlib.pyplot as plt
from utils.functions import train_clf, compute_statistics_single_t0, clf_prob_value, compute_bayesfactor_single_t0, \
odds_ratio_loss, train_pvalue_clf
from models.toy_poisson import ToyPoissonLoader
from models.toy_gmm import ToyGMMLoader
from models.toy_gamma import ToyGammaLoader
from or_classifiers.toy_example_list import classifier_dict, classifier_dict_mlpcomp, classifier_pvalue_dict
model_dict = {
'poisson': ToyPoissonLoader,
'gmm': ToyGMMLoader,
'gamma': ToyGammaLoader
}
def main(run, rep, b, b_prime, alpha, t0_val, sample_size_obs, test_statistic, mlp_comp=False,
monte_carlo_samples=500, debug=False, seed=7, size_check=1000, verbose=False, marginal=False,
size_marginal=1000, guided_sim=False, guided_sample=1000, empirical_marginal=True):
# Changing values if debugging
b = b if not debug else 100
b_prime = b_prime if not debug else 100
size_check = size_check if not debug else 100
rep = rep if not debug else 2
model_obj = model_dict[run](marginal=marginal, size_marginal=size_marginal, empirical_marginal=empirical_marginal)
classifier_dict_run = classifier_dict_mlpcomp if mlp_comp else classifier_dict
# Get the correct functions
msnh_sampling_func = model_obj.sample_msnh_algo5
grid_param = model_obj.grid
gen_obs_func = model_obj.sample_sim
gen_sample_func = model_obj.generate_sample
gen_param_fun = model_obj.sample_param_values
t0_grid = model_obj.pred_grid
tp_func = model_obj.compute_exact_prob
# Creating sample to check entropy about
np.random.seed(seed)
sample_check = gen_sample_func(sample_size=size_check, marginal=marginal)
theta_vec = sample_check[:, :model_obj.d]
x_vec = sample_check[:, (model_obj.d + 1):]
bern_vec = sample_check[:, model_obj.d]
true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)
entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1
else np.log(1 - true_prob_vec[kk])
for kk, el in enumerate(bern_vec)])
# Loop over repetitions and classifiers
# Each time we train the different classifiers, we build the intervals and we record
# whether the point is in or not.
out_val = []
out_cols = ['test_statistic', 'b_prime', 'b', 'classifier', 'classifier_pvalue', 'run', 'rep', 'sample_size_obs',
'cross_entropy_loss', 'cross_entropy_loss_pvalue', 't0_true_val', 'theta_0_current', 'on_true_t0',
'estimated_pvalue', 'in_confint', 'out_confint', 'size_CI', 'true_entropy', 'or_loss_value',
'monte_carlo_samples', 'guided_sim', 'empirical_marginal', 'guided_sample']
pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s, b=%s' % (sample_size_obs, b))
rep_counter = 0
not_update_flag = False
while rep_counter < rep:
# Generates samples for each t0 values, so to be able to check both coverage and power
x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)
# Train the classifier for the odds
clf_odds_fitted = {}
clf_pvalue_fitted = {}
for clf_name, clf_model in sorted(classifier_dict_run.items(), key=lambda x: x[0]):
clf_odds = train_clf(sample_size=b, clf_model=clf_model, gen_function=gen_sample_func,
clf_name=clf_name, nn_square_root=True)
if verbose:
print('----- %s Trained' % clf_name)
if test_statistic == 'acore':
tau_obs = np.array([
compute_statistics_single_t0(
clf=clf_odds, obs_sample=x_obs, t0=theta_0, grid_param_t1=grid_param,
d=model_obj.d, d_obs=model_obj.d_obs) for theta_0 in t0_grid])
elif test_statistic == 'avgacore':
tau_obs = np.array([
compute_bayesfactor_single_t0(
clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,
d=model_obj.d, d_obs=model_obj.d_obs, log_out=False) for theta_0 in t0_grid])
elif test_statistic == 'logavgacore':
tau_obs = np.array([
compute_bayesfactor_single_t0(
clf=clf_odds, obs_sample=x_obs, t0=theta_0, gen_param_fun=gen_param_fun,
d=model_obj.d, d_obs=model_obj.d_obs, log_out=True) for theta_0 in t0_grid])
else:
raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'
' Currently %s' % test_statistic)
# Calculating cross-entropy
est_prob_vec = clf_prob_value(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec, d=model_obj.d,
d_obs=model_obj.d_obs)
loss_value = log_loss(y_true=bern_vec, y_pred=est_prob_vec)
# Calculating or loss
or_loss_value = odds_ratio_loss(clf=clf_odds, x_vec=x_vec, theta_vec=theta_vec,
bern_vec=bern_vec, d=1, d_obs=1)
clf_odds_fitted[clf_name] = (tau_obs, loss_value, or_loss_value)
# Train the P-value regression algorithm for confidence levels
if guided_sim:
# Commenting the above -- we now sample a set of thetas from the parameter (of size guided_sample)
# budget, then resample them according to the odds values, fit a gaussian and then sample the
# datasets from that.
theta_mat_sample = gen_param_fun(sample_size=guided_sample)
if test_statistic == 'acore':
stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,
func1d=lambda row: compute_statistics_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row,
grid_param_t1=grid_param,
d=model_obj.d,
d_obs=model_obj.d_obs
))
elif test_statistic == 'avgacore':
stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row,
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples
))
elif test_statistic == 'logavgacore':
stats_sample = np.apply_along_axis(arr=theta_mat_sample.reshape(-1, 1), axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row,
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples,
log_out=True
))
else:
raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'
' Currently %s' % test_statistic)
# If there are log-odds, then some of the values might be negative, so we need to exponentiate them
# so to make sure that the large negative numbers are counted correctly (i.e. as very low probability,
# not probabilities with large magnitudes).
if test_statistic in ['acore', 'logavgacore']:
stats_sample = np.exp(stats_sample)
stats_sample = stats_sample/np.sum(stats_sample)
theta_mat_gaussian_fit = np.random.choice(a=theta_mat_sample, p=stats_sample.reshape(-1, ),
size=guided_sample)
std_gaussian_fit = np.std(theta_mat_gaussian_fit) if np.std(theta_mat_gaussian_fit) == 0.0 else 1.0
theta_mat = np.clip(
a=np.random.normal(size=b_prime, loc=np.mean(theta_mat_gaussian_fit),
scale=std_gaussian_fit),
a_min=model_obj.low_int, a_max=model_obj.high_int)
sample_mat = np.apply_along_axis(arr=theta_mat.reshape(-1, 1), axis=1,
func1d=lambda row: gen_obs_func(sample_size=sample_size_obs,
true_param=row))
else:
# Generate a matrix with values for both the sampled thetas as the actual samples
theta_mat, sample_mat = msnh_sampling_func(b_prime=b_prime, sample_size=sample_size_obs)
full_mat = np.hstack((theta_mat.reshape(-1, 1), sample_mat))
if test_statistic == 'acore':
stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_statistics_single_t0(
clf=clf_odds,
obs_sample=row[model_obj.d:],
t0=row[:model_obj.d],
grid_param_t1=grid_param,
d=model_obj.d,
d_obs=model_obj.d_obs
))
stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_statistics_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row[:model_obj.d],
grid_param_t1=grid_param,
d=model_obj.d,
d_obs=model_obj.d_obs
))
elif test_statistic == 'avgacore':
stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=row[model_obj.d:],
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples
))
stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples
))
elif test_statistic == 'logavgacore':
stats_mat_generated = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=row[model_obj.d:],
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples,
log_out=True
))
stats_mat_observed = np.apply_along_axis(arr=full_mat, axis=1,
func1d=lambda row: compute_bayesfactor_single_t0(
clf=clf_odds,
obs_sample=x_obs,
t0=row[:model_obj.d],
gen_param_fun=gen_param_fun,
d=model_obj.d,
d_obs=model_obj.d_obs,
monte_carlo_samples=monte_carlo_samples,
log_out=True
))
else:
raise ValueError('The variable test_statistic needs to be either acore, avgacore, logavgacore.'
' Currently %s' % test_statistic)
if np.any(np.isnan(stats_mat_generated)) or not np.all(np.isfinite(stats_mat_generated)) or \
np.any(np.isnan(stats_mat_observed)) or not np.all(np.isfinite(stats_mat_observed)):
not_update_flag = True
break
# Comparing the two vectors of values
clf_pvalue_fitted[clf_name] = {}
indicator_vec = np.greater(stats_mat_observed, stats_mat_generated).astype(int)
for clf_name_pvalue, clf_model_pvalue in sorted(classifier_pvalue_dict.items(), key=lambda x: x[0]):
# If there the indicator_vec is either all 0 or all 1, do not fit a classifier or sklearn will throw
# an error out. Just return the class.
if sum(indicator_vec) <= 1 or sum(indicator_vec) >= len(indicator_vec) - 1:
pval_pred = np.repeat(sum(indicator_vec) / len(indicator_vec), b_prime)
loss_value_pval = np.nan
else:
clf_pvalue = train_pvalue_clf(clf_model=clf_model_pvalue, X=theta_mat.reshape(-1, model_obj.d),
y=indicator_vec.reshape(-1, ), clf_name=clf_name_pvalue,
nn_square_root=True)
pval_pred = clf_pvalue.predict_proba(t0_grid.reshape(-1, model_obj.d))[:, 1]
theta_mat_pred = clf_pvalue.predict_proba(theta_mat.reshape(-1, model_obj.d))[:, 1]
loss_value_pval = log_loss(y_true=indicator_vec, y_pred=theta_mat_pred)
clf_pvalue_fitted[clf_name][clf_name_pvalue] = (pval_pred, loss_value_pval)
# If there were some problems in calculating the statistics, get out of the loop
if not_update_flag:
not_update_flag = False
continue
# At this point all it's left is to record
for clf_name, (tau_obs_val, cross_ent_loss, or_loss_value) in clf_odds_fitted.items():
for clf_name_qr, (pvalue_val, pvalue_celoss_val) in clf_pvalue_fitted[clf_name].items():
size_temp = np.mean((pvalue_val > alpha).astype(int))
for kk, theta_0_current in enumerate(t0_grid):
out_val.append([
test_statistic, b_prime, b, clf_name, clf_name_qr, run, rep_counter, sample_size_obs,
cross_ent_loss, pvalue_celoss_val, t0_val, theta_0_current, int(t0_val == theta_0_current),
pvalue_val[kk], int(pvalue_val[kk] > alpha),
int(pvalue_val[kk] <= alpha), size_temp, entropy_est, or_loss_value,
monte_carlo_samples, int(guided_sim), int(empirical_marginal), guided_sample
])
pbar.update(1)
rep_counter += 1
# Saving the results
out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)
out_dir = 'sims/classifier_cov_pow_toy/'
out_filename = 'classifier_reps_cov_pow_toy_pvalues_%steststats_%s_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s%s_%s.csv' % (
test_statistic, 'mlp_comp' if mlp_comp else 'toyclassifiers', b, b_prime, run, rep,
str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'),
'_empirmarg' if empirical_marginal else '',
datetime.strftime(datetime.today(), '%Y-%m-%d-%H-%M')
)
out_df.to_csv(out_dir + out_filename)
# Print results
cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'classifier_pvalue', 'in_confint',
'cross_entropy_loss', 'cross_entropy_loss_pvalue', 'size_CI']]
print(cov_df.groupby(['classifier', 'classifier_pvalue']).agg({'in_confint': [np.average],
'size_CI': [np.average, np.std],
'cross_entropy_loss': [np.average],
'cross_entropy_loss_pvalue': [np.average]}))
# Power plots
out_df['class_combo'] = out_df[['classifier', 'classifier_pvalue']].apply(lambda x: x[0] + '---' + x[1], axis = 1)
plot_df = out_df[['class_combo', 'theta_0_current', 'out_confint']].groupby(
['class_combo', 'theta_0_current']).mean().reset_index()
fig = plt.figure(figsize=(20, 10))
sns.lineplot(x='theta_0_current', y='out_confint', hue='class_combo', data=plot_df, palette='cubehelix')
plt.legend(loc='best', fontsize=25)
plt.xlabel(r'$\theta$', fontsize=25)
plt.ylabel('Power', fontsize=25)
plt.title("Power of Hypothesis Test, B=%s, B'=%s, n=%s, %s" % (
b, b_prime, sample_size_obs, run.title()), fontsize=25)
out_dir = 'images/classifier_cov_pow_toy/'
outfile_name = 'power_classifier_reps_pvalue_%steststats_%sB_%sBprime_%s_%srep_alpha%s_sampleobs%s_t0val%s_%s.pdf' % (
test_statistic, b, b_prime, run, rep, str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'),
datetime.strftime(datetime.today(), '%Y-%m-%d')
)
plt.tight_layout()
plt.savefig(out_dir + outfile_name)
plt.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', action="store", type=int, default=7,
help='Random State')
parser.add_argument('--rep', action="store", type=int, default=10,
help='Number of Repetitions for calculating the Pinball loss')
parser.add_argument('--b', action="store", type=int, default=5000,
help='Sample size to train the classifier for calculating odds')
parser.add_argument('--b_prime', action="store", type=int, default=1000,
help='Sample size to train the quantile regression algorithm')
parser.add_argument('--marginal', action='store_true', default=False,
help='Whether we are using a parametric approximation of the marginal or'
'the baseline reference G')
parser.add_argument('--alpha', action="store", type=float, default=0.1,
help='Statistical confidence level')
parser.add_argument('--run', action="store", type=str, default='poisson',
help='Problem to run')
parser.add_argument('--debug', action='store_true', default=False,
help='If true, a very small value for the sample sizes is fit to make sure the'
'file can run quickly for debugging purposes')
parser.add_argument('--verbose', action='store_true', default=False,
help='If true, logs are printed to the terminal')
parser.add_argument('--sample_size_obs', action="store", type=int, default=10,
help='Sample size of the actual observed data.')
parser.add_argument('--t0_val', action="store", type=float, default=10.0,
help='True parameter which generates the observed dataset')
parser.add_argument('--size_marginal', action="store", type=int, default=1000,
help='Sample size of the actual marginal distribution, if marginal is True.')
parser.add_argument('--monte_carlo_samples', action="store", type=int, default=500,
help='Sample size for the calculation of the avgacore and logavgacore statistic.')
parser.add_argument('--test_statistic', action="store", type=str, default='acore',
help='Test statistic to compute confidence intervals. Can be acore|avgacore|logavgacore')
parser.add_argument('--mlp_comp', action='store_true', default=False,
help='If true, we compare different MLP training algorithm.')
parser.add_argument('--empirical_marginal', action='store_true', default=False,
help='Whether we are sampling directly from the empirical marginal for G')
parser.add_argument('--guided_sim', action='store_true', default=False,
help='If true, we guided the sampling for the B prime in order to get meaningful results.')
parser.add_argument('--guided_sample', action="store", type=int, default=2500,
help='The sample size to be used for the guided simulation. Only used if guided_sim is True.')
argument_parsed = parser.parse_args()
# b_vec = [100, 500, 1000]
# for b_val in b_vec:
main(
run=argument_parsed.run,
rep=argument_parsed.rep,
marginal=argument_parsed.marginal,
b=argument_parsed.b,
b_prime=argument_parsed.b_prime,
alpha=argument_parsed.alpha,
debug=argument_parsed.debug,
sample_size_obs=argument_parsed.sample_size_obs,
t0_val=argument_parsed.t0_val,
seed=argument_parsed.seed,
verbose=argument_parsed.verbose,
size_marginal=argument_parsed.size_marginal,
monte_carlo_samples=argument_parsed.monte_carlo_samples,
test_statistic=argument_parsed.test_statistic,
mlp_comp=argument_parsed.mlp_comp,
empirical_marginal=argument_parsed.empirical_marginal,
guided_sim=argument_parsed.guided_sim,
guided_sample=argument_parsed.guided_sample
)
| 63.491184 | 134 | 0.519281 |
f71aa8d7c382bafc56b06793ddb3976f1a195ca1 | 11,480 | py | Python | StructVBERT/tasks/vqa.py | onlyrico/AliceMind | a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded | [
"Apache-2.0"
] | 1 | 2021-08-05T05:41:50.000Z | 2021-08-05T05:41:50.000Z | StructVBERT/tasks/vqa.py | onlyrico/AliceMind | a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded | [
"Apache-2.0"
] | null | null | null | StructVBERT/tasks/vqa.py | onlyrico/AliceMind | a6a070b1610e4c4bfe84ee6c4195b2bc4f725ded | [
"Apache-2.0"
] | 1 | 2021-07-10T09:50:47.000Z | 2021-07-10T09:50:47.000Z | # coding=utf-8
# Copyleft 2019 project LXRT.
import os
import collections
import torch
import torch.nn as nn
import logging
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from param import args
from lxrt.qa_answer_table import load_lxmert_qa
from tasks.vqa_model import VQAModel
from tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def get_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:
dset = VQADataset(splits)
tset = VQATorchDataset(dset)
evaluator = VQAEvaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator)
class WarmupOptimizer(object):
def __init__(self, _lr_base, optimizer, _data_size, _batch_size):
self.optimizer = optimizer
self._step = 0
self._lr_base = _lr_base
self._rate = 0
self._data_size = _data_size
self._batch_size = _batch_size
def step(self):
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def zero_grad(self):
self.optimizer.zero_grad()
def rate(self, step=None):
if step is None:
step = self._step
if step <= int(self._data_size / self._batch_size * 1):
r = self._lr_base * 1/4.
elif step <= int(self._data_size / self._batch_size * 2):
r = self._lr_base * 2/4.
elif step <= int(self._data_size / self._batch_size * 3):
r = self._lr_base * 3/4.
else:
r = self._lr_base
return r
def adjust_learning_rate(optimizer, decay_rate):
optimizer._lr_base *= decay_rate
class VQA:
def __init__(self):
# Datasets
self.train_tuple = get_data_tuple(
args.train, bs=args.batch_size, shuffle=True, drop_last=True
)
if args.valid != "":
self.valid_tuple = get_data_tuple(
args.valid, bs=256, # for large model
shuffle=False, drop_last=False
)
else:
self.valid_tuple = None
# Model
self.model = VQAModel(self.train_tuple.dataset.num_answers)
self._lr_decay_epoch_list = [8, 10]
self._lr_decay_rate = 0.2
# Load pre-trained weights
if args.load_lxmert is not None:
self.model.lxrt_encoder.load(args.load_lxmert)
if args.load_lxmert_qa is not None:
load_lxmert_qa(args.load_lxmert_qa, self.model,
label2ans=self.train_tuple.dataset.label2ans)
if args.fix_language_bert:
assert args.patial_load
state_dict = torch.load(args.patial_load)
for k in state_dict.copy():
if not k.startswith('bert.'):
state_dict['bert.' + k.replace('gamma', 'weight').replace('beta', 'bias')] = state_dict.pop(k)
# fix bert parameters
for name, param in self.model.lxrt_encoder.model.named_parameters():
# if 'pooler' in name: # pooler not fixed
# continue
if name in state_dict:
logger.info('fix param for: {}'.format(name))
param.requires_grad = False
# GPU options
self.model = self.model.cuda()
# Loss and Optimizer
self.bce_loss = nn.BCEWithLogitsLoss()
if 'bert' in args.optim:
batch_per_epoch = len(self.train_tuple.loader)
t_total = int(batch_per_epoch * args.epochs)
logger.info("BertAdam Total Iters: %d" % t_total)
from lxrt.optimization import BertAdam
self.optim = BertAdam(list(self.model.parameters()),
lr=args.lr,
warmup=0.1,
t_total=t_total)
elif 'adam' in args.optim:
batch_per_epoch = len(self.train_tuple.loader)
optim = args.optimizer(filter(lambda p: p.requires_grad, self.model.parameters()), lr=0, betas=(0.9, 0.98), eps=1e-9)
self.optim = WarmupOptimizer(args.lr, optim, batch_per_epoch * args.batch_size, args.batch_size)
else:
self.optim = args.optimizer(self.model.parameters(), args.lr)
if args.amp_type is not None:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
self.model, self.optim = amp.initialize(self.model, self.optim, opt_level=args.amp_type)
if args.multiGPU:
self.model.lxrt_encoder.multi_gpu()
# Output Directory
self.output = args.output
os.makedirs(self.output, exist_ok=True)
def train(self, train_tuple, eval_tuple):
dset, loader, evaluator = train_tuple
iter_wrapper = (lambda x: tqdm(x, total=len(loader))) if args.tqdm else (lambda x: x)
best_valid = 0.
for epoch in range(args.epochs):
quesid2ans = {}
if 'adam' in args.optim and epoch in self._lr_decay_epoch_list:
adjust_learning_rate(self.optim, self._lr_decay_rate)
for i, (ques_id, feats, boxes, sent, target) in iter_wrapper(enumerate(loader)):
self.model.train()
self.optim.zero_grad()
feats, boxes, target = feats.cuda(), boxes.cuda(), target.cuda()
logit = self.model(feats, boxes, sent)
assert logit.dim() == target.dim() == 2
loss = self.bce_loss(logit, target)
loss = loss * logit.size(1)
if args.multiGPU:
loss = loss.mean() # mean() to average on multi-gpu.
if args.amp_type is not None:
from apex import amp
with amp.scale_loss(loss, self.optim) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), args.clip_norm)
self.optim.step()
score, label = logit.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
log_str = "\nEpoch %d: Train %0.2f\n" % (epoch, evaluator.evaluate(quesid2ans) * 100.)
if self.valid_tuple is not None: # Do Validation
valid_score = self.evaluate(eval_tuple)
if valid_score > best_valid:
best_valid = valid_score
self.save("BEST")
log_str += "Epoch %d: Valid %0.2f\n" % (epoch, valid_score * 100.) + \
"Epoch %d: Best %0.2f\n" % (epoch, best_valid * 100.)
logger.info(log_str)
with open(self.output + "/log.log", 'a') as f:
f.write(log_str)
f.flush()
self.save("LAST")
def predict(self, eval_tuple: DataTuple, dump=None):
"""
Predict the answers to questions in a data split.
:param eval_tuple: The data tuple to be evaluated.
:param dump: The path of saved file to dump results.
:return: A dict of question_id to answer.
"""
self.model.eval()
dset, loader, evaluator = eval_tuple
quesid2ans = {}
for i, datum_tuple in enumerate(loader):
ques_id, feats, boxes, sent = datum_tuple[:4] # Avoid seeing ground truth
with torch.no_grad():
feats, boxes = feats.cuda(), boxes.cuda()
logit = self.model(feats, boxes, sent)
if args.with_score:
logit = nn.Softmax(dim=1)(logit)
score, label = logit.max(1)
if args.with_score:
for qid, l, s in zip(ques_id, label.cpu().numpy(), score.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = (ans, str(s))
else:
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
if dump is not None:
evaluator.dump_result(quesid2ans, dump)
return quesid2ans
def evaluate(self, eval_tuple: DataTuple, dump=None):
"""Evaluate all data in data_tuple."""
quesid2ans = self.predict(eval_tuple, dump)
return eval_tuple.evaluator.evaluate(quesid2ans)
@staticmethod
def oracle_score(data_tuple):
dset, loader, evaluator = data_tuple
quesid2ans = {}
for i, (ques_id, feats, boxes, sent, target) in enumerate(loader):
_, label = target.max(1)
for qid, l in zip(ques_id, label.cpu().numpy()):
ans = dset.label2ans[l]
quesid2ans[qid.item()] = ans
return evaluator.evaluate(quesid2ans)
def save(self, name):
torch.save(self.model.state_dict(),
os.path.join(self.output, "%s.pth" % name))
def load(self, path):
logger.info("Load model from %s" % path)
state_dict = torch.load("%s.pth" % path)
self.model.load_state_dict(state_dict)
if __name__ == "__main__":
# Build Class
vqa = VQA()
# Load VQA model weights
if args.load is not None:
vqa.load(args.load)
# Test or Train
if args.test is not None:
args.fast = args.tiny = False # Always loading all data in test
if 'test' in args.test:
vqa.predict(
get_data_tuple(args.test, bs=950,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'test_predict.json')
)
elif 'val' in args.test:
# Since part of valididation data are used in pre-training/fine-tuning,
# only validate on the minival set.
result = vqa.evaluate(
get_data_tuple('minival', bs=950,
shuffle=False, drop_last=False),
dump=os.path.join(args.output, 'minival_predict.json')
)
logger.info(result)
else:
assert False, "No such test option for %s" % args.test
else:
# print('Splits in Train data:', vqa.train_tuple.dataset.splits)
logger.info('Splits in Train data: {}'.format(vqa.train_tuple.dataset.splits))
if vqa.valid_tuple is not None:
logger.info('Splits in Valid data: {}'.format(vqa.valid_tuple.dataset.splits))
logger.info("Valid Oracle: %0.2f" % (vqa.oracle_score(vqa.valid_tuple) * 100))
else:
logger.info("DO NOT USE VALIDATION")
vqa.train(vqa.train_tuple, vqa.valid_tuple)
| 38.394649 | 129 | 0.567334 |
f71aa988a5098b28bbada6d39c5173f2c7f1034c | 1,683 | py | Python | python/ctci/1_arrays_strings/6_Compression.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
] | null | null | null | python/ctci/1_arrays_strings/6_Compression.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
] | null | null | null | python/ctci/1_arrays_strings/6_Compression.py | othonreyes/code_problems | 6e65b26120b0b9d6e5ac7342a4d964696b7bd5bf | [
"MIT"
] | null | null | null | # Create a function that implements a basic compression algorithm by counting the chars
# thtat are present in a string, if the result string is longer than input
# then return original input.
#
# Examples:
# aaabcccccaaa: a3b1c5a3
# abcdef: abcdef
# aaaaaaaaaaba: a10b1a1
### Note: Don't use extra space
import unittest
from collections import Counter
def compress2(s1):
newStr = []
count = 0
for i in range(len(s1)):
# Explanation
# the i != 0 is used to deal with the first character.
# we could have done but requirs extra code:
# char = s1[0] # requires to check if the s1 is not empty
# - or -
# char = '' # requires to check if char != ''
if i != 0 and s1[i] != s1[i-1]:
newStr.append(s1[i-1] + str(count))
count = 0
count += 1
newStr.append(s1[-1] + str(count)) # we do this to deal with the last characters
return min(s1, ''.join(newStr), key=len)
def compress(s1):
newStr = ''
char = ''
count = 0
for i in range(len(s1)):
if char != s1[i]:
if char != '': # we do this to deal with the initial case
newStr += char + str(count)
char = s1[i]
count = 1
else:
count += 1
newStr += char + str(count) # we do this to deal with the last characters
if len(newStr) > len(s1):
return s1
return newStr
class Test(unittest.TestCase):
valid = (
('aaabcccccaaa', 'a3b1c5a3'),
('abcdef', 'abcdef'),
('aaaaaaaaaaba', 'a10b1a1')
)
def test(self):
for [input, expected] in self.valid:
print(input,' vs ',expected)
result = compress(input)
self.assertEqual(result, expected)
if __name__ == "__main__":
unittest.main() | 25.892308 | 87 | 0.618538 |
f71aad03581521af34e46f4263fc80abdb4a99c3 | 6,135 | py | Python | asposewordscloud/models/requests/insert_list_online_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 14 | 2018-07-15T17:01:52.000Z | 2018-11-29T06:15:33.000Z | asposewordscloud/models/requests/insert_list_online_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 1 | 2018-09-28T12:59:34.000Z | 2019-10-08T08:42:59.000Z | asposewordscloud/models/requests/insert_list_online_request.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 2 | 2020-12-21T07:59:17.000Z | 2022-02-16T21:41:25.000Z | # coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="insert_list_online_request.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import json
from six.moves.urllib.parse import quote
from asposewordscloud import *
from asposewordscloud.models import *
from asposewordscloud.models.requests import *
from asposewordscloud.models.responses import *
class InsertListOnlineRequest(BaseRequestObject):
"""
Request model for insert_list_online operation.
Initializes a new instance.
:param document The document.
:param list_insert List object.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
:param dest_file_name Result path of the document after the operation. If this parameter is omitted then result of the operation will be saved as the source document.
:param revision_author Initials of the author to use for revisions.If you set this parameter and then make some changes to the document programmatically, save the document and later open the document in MS Word you will see these changes as revisions.
:param revision_date_time The date and time to use for revisions.
"""
def __init__(self, document, list_insert, load_encoding=None, password=None, dest_file_name=None, revision_author=None, revision_date_time=None):
self.document = document
self.list_insert = list_insert
self.load_encoding = load_encoding
self.password = password
self.dest_file_name = dest_file_name
self.revision_author = revision_author
self.revision_date_time = revision_date_time
def create_http_request(self, api_client):
# verify the required parameter 'document' is set
if self.document is None:
raise ValueError("Missing the required parameter `document` when calling `insert_list_online`") # noqa: E501
# verify the required parameter 'list_insert' is set
if self.list_insert is None:
raise ValueError("Missing the required parameter `list_insert` when calling `insert_list_online`") # noqa: E501
path = '/v4.0/words/online/post/lists'
path_params = {}
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
if self.dest_file_name is not None:
query_params.append(('destFileName', self.dest_file_name)) # noqa: E501
if self.revision_author is not None:
query_params.append(('revisionAuthor', self.revision_author)) # noqa: E501
if self.revision_date_time is not None:
query_params.append(('revisionDateTime', self.revision_date_time)) # noqa: E501
header_params = {}
# HTTP header `Content-Type`
header_params['Content-Type'] = api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
form_params = []
if self.document is not None:
form_params.append(['document', self.document, 'file']) # noqa: E501
if self.list_insert is not None:
form_params.append(['listInsert', self.list_insert.to_json(), 'string']) # noqa: E501
body_params = None
return {
"method": "PUT",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'InsertListOnlineResponse' # noqa: E501
}
def get_response_type(self):
return 'InsertListOnlineResponse' # noqa: E501
def deserialize_response(self, api_client, response):
multipart = self.getparts(response)
return InsertListOnlineResponse(
self.deserialize(json.loads(multipart[0].text), ListResponse, api_client),
self.deserialize_file(multipart[1].content, multipart[1].headers, api_client))
| 49.08 | 255 | 0.669927 |
f71ad0e03a1f64c0b8808cee586a271e9c91b997 | 950 | py | Python | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 966 | 2017-04-18T04:14:04.000Z | 2022-03-03T21:22:44.000Z | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 48 | 2017-05-02T23:51:29.000Z | 2021-12-06T19:10:11.000Z | clif/testing/python/non_raising_test.py | wangxf123456/clif | 9bff8a28f5d266d6ea4f4bb0dc1d9c9a0c9ee5b1 | [
"Apache-2.0"
] | 135 | 2017-04-26T06:15:30.000Z | 2022-01-07T02:17:20.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from clif.testing.python import non_raising
class NonRaisingTest(absltest.TestCase):
def testPlain(self):
num = non_raising.MakeTestNonRaisingPlain()
self.assertEqual(num, 1)
def testMarked(self):
num = non_raising.MakeTestNonRaisingMarked()
self.assertEqual(num, -1)
if __name__ == '__main__':
absltest.main()
| 28.787879 | 74 | 0.753684 |
f71b00645a1360df4f8b7496608b98342bb43f7f | 8,243 | py | Python | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | null | null | null | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | caltech-netlab/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 3 | 2021-04-28T14:43:32.000Z | 2021-04-28T14:58:04.000Z | gym_acnportal/gym_acnsim/envs/tests/test_action_spaces.py | sunash/gym-acnportal | cacd2e4aa9159a3bf7f0b8e3db2dbb0832d76e46 | [
"BSD-3-Clause"
] | 1 | 2020-05-12T19:13:51.000Z | 2020-05-12T19:13:51.000Z | # coding=utf-8
""" Tests for SimAction and action space functions. """
import unittest
from typing import Callable, Dict, List, Any
from unittest.mock import create_autospec
import numpy as np
from gym import Space
from ..action_spaces import (
SimAction,
single_charging_schedule,
zero_centered_single_charging_schedule,
)
from ...interfaces import GymTrainedInterface
class TestSimAction(unittest.TestCase):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
# The type here is Any as space_function is actually a Mock
# object, but there's no Mock type in the typing library.
cls.space_function: Any = create_autospec(lambda interface: Space())
cls.to_schedule: Callable[
[GymTrainedInterface, np.ndarray], Dict[str, List[float]]
] = lambda interface, array: {"a": [0]}
cls.name: str = "stub_action"
cls.sim_action: SimAction = SimAction(
cls.space_function, cls.to_schedule, cls.name
)
cls.interface: GymTrainedInterface = create_autospec(GymTrainedInterface)
def test_correct_on_init_sim_action_name(self) -> None:
self.assertEqual(self.sim_action.name, self.name)
def test_get_space(self) -> None:
self.sim_action.get_space(self.interface)
self.space_function.assert_called_once()
def test_get_schedule(self) -> None:
array: np.ndarray = np.array([[1, 0], [0, 1]])
self.assertEqual(
self.sim_action.get_schedule(self.interface, array), {"a": [0]}
)
class TestSingleChargingSchedule(unittest.TestCase):
# Some class variables are defined outside of setUpClass so that
# the code inspector knows that inherited classes have these
# attributes.
max_rate: float = 16.0
min_rate: float = 0.0
negative_rate: float = -4.0
deadband_rate: float = 6.0
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
cls.sim_action: SimAction = single_charging_schedule()
cls.station_ids: List[str] = ["T1", "T2"]
cls.offset: float = 0.5
def _interface_builder(interface: Any, min_rate: float) -> Any:
interface.station_ids = cls.station_ids
interface.max_pilot_signal = lambda station_id: cls.max_rate
interface.min_pilot_signal = lambda station_id: (
min_rate if station_id == cls.station_ids[1] else cls.min_rate
)
return interface
cls.interface: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.min_rate
)
cls.interface_negative_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.negative_rate
)
cls.interface_deadband_min: Any = _interface_builder(
create_autospec(GymTrainedInterface), cls.deadband_rate
)
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "single schedule")
def _test_space_function_helper(
self, interface: GymTrainedInterface, min_rate: float, max_rate: float
) -> None:
out_space: Space = self.sim_action.get_space(interface)
self.assertEqual(out_space.shape, (len(self.station_ids),))
np.testing.assert_equal(out_space.low, 2 * [min_rate])
np.testing.assert_equal(out_space.high, 2 * [max_rate])
self.assertEqual(out_space.dtype, "float")
def test_single_space_function(self) -> None:
self._test_space_function_helper(self.interface, self.min_rate, self.max_rate)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min, self.negative_rate, self.max_rate
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.min_rate, self.max_rate
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[self.min_rate + self.offset, (self.max_rate - self.min_rate) / 2]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate + self.offset],
self.station_ids[1]: [(self.max_rate - self.min_rate) / 2],
},
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [self.min_rate - self.offset],
self.station_ids[1]: [self.max_rate + self.offset],
},
)
def test_single_error_schedule(self) -> None:
with self.assertRaises(TypeError):
_ = self.sim_action.get_schedule(
self.interface,
np.array(
[[self.min_rate - self.offset], [self.max_rate + self.offset]]
),
)
class TestZeroCenteredSingleChargingSchedule(TestSingleChargingSchedule):
# noinspection PyMissingOrEmptyDocstring
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.sim_action: SimAction = zero_centered_single_charging_schedule()
cls.shifted_max = cls.max_rate - (cls.max_rate + cls.min_rate) / 2
cls.shifted_minimums = [
cls.min_rate - (cls.max_rate + cls.min_rate) / 2,
cls.negative_rate - (cls.max_rate + cls.negative_rate) / 2,
cls.min_rate - (cls.max_rate + cls.deadband_rate) / 2,
]
cls.negative_max_shift = cls.max_rate - (cls.max_rate + cls.negative_rate) / 2
def test_correct_on_init_single_name(self) -> None:
self.assertEqual(self.sim_action.name, "zero-centered single schedule")
def test_single_space_function(self) -> None:
self._test_space_function_helper(
self.interface, self.shifted_minimums[0], self.shifted_max
)
def test_single_space_function_negative_min(self) -> None:
self._test_space_function_helper(
self.interface_negative_min,
self.shifted_minimums[1],
self.negative_max_shift,
)
def test_single_space_function_deadband_min(self) -> None:
self._test_space_function_helper(
self.interface_deadband_min, self.shifted_minimums[2], self.shifted_max
)
def test_single_to_bad_schedule(self) -> None:
# The get_schedule function does not test if the input schedule
# array is within the action space.
bad_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array([self.min_rate - self.offset, self.max_rate + self.offset]),
)
self.assertEqual(
bad_schedule,
{
self.station_ids[0]: [
self.min_rate - self.offset + (self.max_rate + self.min_rate) / 2
],
self.station_ids[1]: [
self.max_rate + self.offset + (self.max_rate + self.min_rate) / 2
],
},
)
def test_single_to_schedule(self) -> None:
good_schedule: Dict[str, List[float]] = self.sim_action.get_schedule(
self.interface,
np.array(
[
self.min_rate - (self.max_rate + self.min_rate) / 2,
self.max_rate - (self.max_rate + self.min_rate) / 2,
]
),
)
self.assertEqual(
good_schedule,
{
self.station_ids[0]: [self.min_rate],
self.station_ids[1]: [self.max_rate],
},
)
if __name__ == "__main__":
unittest.main()
| 37.298643 | 86 | 0.626592 |
f71b2aad75e30594e61025ad33be2a2c17932235 | 2,792 | py | Python | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/-reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | 1 | 2022-03-07T12:18:00.000Z | 2022-03-07T12:18:00.000Z | reinvent_models/link_invent/networks/encoder_decoder.py | GT4SD/reinvent_models | e1cf00d1b24fe5f39354e34829adc25460da84e2 | [
"MIT"
] | null | null | null | """
Implementation of a network using an Encoder-Decoder architecture.
"""
import torch.nn as tnn
from torch import Tensor
from reinvent_models.link_invent.networks.decoder import Decoder
from reinvent_models.link_invent.networks.encoder import Encoder
class EncoderDecoder(tnn.Module):
"""
An encoder-decoder that combines input with generated targets.
"""
def __init__(self, encoder_params: dict, decoder_params: dict):
super(EncoderDecoder, self).__init__()
self._encoder = Encoder(**encoder_params)
self._decoder = Decoder(**decoder_params)
def forward(self, encoder_seqs: Tensor, encoder_seq_lengths: Tensor, decoder_seqs: Tensor,
decoder_seq_lengths: Tensor):
"""
Performs the forward pass.
:param encoder_seqs: A tensor with the output sequences (batch, seq_d, dim).
:param encoder_seq_lengths: A list with the length of each input sequence.
:param decoder_seqs: A tensor with the encoded input input sequences (batch, seq_e, dim).
:param decoder_seq_lengths: The lengths of the decoder sequences.
:return : The output logits as a tensor (batch, seq_d, dim).
"""
encoder_padded_seqs, hidden_states = self.forward_encoder(encoder_seqs, encoder_seq_lengths)
logits, _, _ = self.forward_decoder(decoder_seqs, decoder_seq_lengths, encoder_padded_seqs, hidden_states)
return logits
def forward_encoder(self, padded_seqs: Tensor, seq_lengths: Tensor):
"""
Does a forward pass only of the encoder.
:param padded_seqs: The data to feed the encoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns a tuple with (encoded_seqs, hidden_states)
"""
return self._encoder(padded_seqs, seq_lengths)
def forward_decoder(self, padded_seqs: Tensor, seq_lengths: Tensor, encoder_padded_seqs: Tensor,
hidden_states: Tensor):
"""
Does a forward pass only of the decoder.
:param hidden_states: The hidden states from the encoder.
:param padded_seqs: The data to feed to the decoder.
:param seq_lengths: The length of each sequence in the batch.
:return : Returns the logits and the hidden state for each element of the sequence passed.
"""
return self._decoder(padded_seqs, seq_lengths, encoder_padded_seqs, hidden_states)
def get_params(self):
"""
Obtains the params for the network.
:return : A dict with the params.
"""
return {
"encoder_params": self._encoder.get_params(),
"decoder_params": self._decoder.get_params()
}
| 42.30303 | 115 | 0.666189 |
f71b2b58505f1a73cc43c49801a8cae13c3f8a26 | 43 | py | Python | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2018-04-02T15:38:10.000Z | 2018-04-02T15:38:10.000Z | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | null | null | null | src/Application/PythonScriptModule/proto/state_2.py | antont/tundra | 5c9b0a3957071f08ab425dff701cdbb34f9e1868 | [
"Apache-2.0"
] | 1 | 2021-09-04T12:37:34.000Z | 2021-09-04T12:37:34.000Z | import state
def change():
state.x = 2 | 10.75 | 15 | 0.627907 |
f71b2ed9253b60e916abe7efa50cc6715f2d213c | 2,044 | py | Python | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 3 | 2020-02-12T01:24:46.000Z | 2020-02-13T00:50:46.000Z | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 32 | 2020-02-20T10:20:56.000Z | 2022-02-10T01:42:46.000Z | test/crawler/testICrawler.py | AutoDash/AutoDash | 3924795a04159f80ea3b65b2172747babd15f35f | [
"Apache-2.0"
] | 1 | 2020-02-22T02:47:19.000Z | 2020-02-22T02:47:19.000Z | #!/usr/bin/env python3
import unittest
from src.crawler.iCrawler import iCrawler, UndefinedDatabaseException
from src.data.MetaDataItem import MetaDataItem
from test.mock.MockDataAccessor import MockDataAccessor
class MockCrawler(iCrawler):
def __init__(self):
super().__init__()
def next_downloadable(self):
return MetaDataItem(
title="title",
url="fake url 1",
download_src="youtube")
class TestICrawler(unittest.TestCase):
def setUp(self):
self.crawler = MockCrawler()
self.database = MockDataAccessor()
def test_compiles(self):
self.assertEqual(True, True)
def test_no_database(self):
metadata = self.crawler.next_downloadable()
try:
self.crawler.check_new_url(metadata.url)
self.assertTrue(False)
except UndefinedDatabaseException:
# Expected error
pass
def test_check_new_url(self):
self.crawler.set_database(self.database)
metadata = self.crawler.next_downloadable()
self.assertTrue(self.crawler.check_new_url(metadata.url))
self.database.publish_new_metadata(metadata)
self.assertFalse(self.crawler.check_new_url(metadata.url))
def test_run(self):
self.crawler.set_database(self.database)
metadata = self.crawler.run({})
self.database.publish_new_metadata(metadata)
id_list = self.database.fetch_video_id_list()
self.assertTrue(len(id_list) == 1)
metadata = self.database.fetch_metadata(id_list[0])
# Get exact copy of the metadata item that was published
copy_metadata = self.crawler.next_downloadable()
# need to do this cause the times can be off
copy_metadata.date_created = metadata.date_created
copy_metadata.id = metadata.id #need to do this because otherwise cant compare
self.assertEqual(metadata.to_json(), copy_metadata.to_json())
if __name__ == '__main__':
unittest.main()
| 29.623188 | 86 | 0.675147 |
f71b315d6312d73a4f7581bd22785f23c8cb7785 | 5,935 | py | Python | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 1 | 2017-07-31T07:07:32.000Z | 2017-07-31T07:07:32.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | dstoup/kwiver | a3a36317b446baf0feb6274235ab1ac6b4329ead | [
"BSD-3-Clause"
] | 3 | 2021-03-19T15:39:43.000Z | 2021-09-08T02:47:15.000Z | sprokit/tests/bindings/python/sprokit/pipeline/test-scheduler_registry.py | acidburn0zzz/kwiver | 6e4205f1c46df04759c57c040f01cc804b27e00d | [
"BSD-3-Clause"
] | null | null | null | #!@PYTHON_EXECUTABLE@
#ckwg +28
# Copyright 2011-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
from sprokit.pipeline import config
import sprokit.pipeline.scheduler_factory
except:
test_error("Failed to import the scheduler_factory module")
def test_create():
from sprokit.pipeline import config
from sprokit.pipeline import scheduler_factory
scheduler_factory.SchedulerType()
## scheduler_factory.SchedulerTypes()
scheduler_factory.SchedulerDescription()
scheduler_factory.SchedulerModule()
def test_api_calls():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'thread_per_process'
c = config.empty_config()
p = pipeline.Pipeline()
scheduler_factory.create_scheduler(sched_type, p)
scheduler_factory.create_scheduler(sched_type, p, c)
scheduler_factory.types()
scheduler_factory.description(sched_type)
scheduler_factory.default_type
def example_scheduler(check_init):
from sprokit.pipeline import scheduler
class PythonExample(scheduler.PythonScheduler):
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
self.ran_start = check_init
self.ran_wait = check_init
self.ran_stop = check_init
self.ran_pause = check_init
self.ran_resume = check_init
def _start(self):
self.ran_start = True
def _wait(self):
self.ran_wait = True
def _stop(self):
self.ran_stop = True
def _pause(self):
self.ran_pause = True
def _resume(self):
self.ran_resume = True
def __del__(self):
if not self.ran_start:
test_error("start override was not called")
if not self.ran_wait:
test_error("wait override was not called")
if not self.ran_stop:
test_error("stop override was not called")
if not self.ran_pause:
test_error("pause override was not called")
if not self.ran_resume:
test_error("resume override was not called")
return PythonExample
def test_register():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
sched_type = 'python_example'
sched_desc = 'simple description'
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(True))
if not sched_desc == scheduler_factory.description(sched_type):
test_error("Description was not preserved when registering")
p = pipeline.Pipeline()
try:
s = scheduler_factory.create_scheduler(sched_type, p)
if s is None:
raise Exception()
except:
test_error("Could not create newly registered scheduler type")
def test_wrapper_api():
from sprokit.pipeline import config
from sprokit.pipeline import modules
from sprokit.pipeline import pipeline
from sprokit.pipeline import process_factory
from sprokit.pipeline import scheduler_factory
sched_type = 'python_example'
sched_desc = 'simple description'
modules.load_known_modules()
scheduler_factory.add_scheduler(sched_type, sched_desc, example_scheduler(False))
p = pipeline.Pipeline()
proc_type = 'orphan'
proc_name = 'orphan'
proc = process_factory.create_process(proc_type, proc_name)
p.add_process(proc)
def check_scheduler(s):
if s is None:
test_error("Got a 'None' scheduler")
return
s.start()
s.pause()
s.resume()
s.stop()
s.start()
s.wait()
del s
p.reset()
p.setup_pipeline()
s = scheduler_factory.create_scheduler(sched_type, p)
check_scheduler(s)
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| 29.824121 | 85 | 0.694356 |
f71b4bb600bb418ed1ef7e86a5615b6ad8bfabf3 | 2,753 | py | Python | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | 2 | 2020-07-24T19:26:51.000Z | 2021-08-21T21:04:11.000Z | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | null | null | null | python/dgl/nn/mxnet/conv/agnnconv.py | jinghuix/dgl | fae26dd15caac92458a08ad34889086e1e333ddd | [
"Apache-2.0"
] | 1 | 2021-03-09T12:42:46.000Z | 2021-03-09T12:42:46.000Z | """MXNet Module for Attention-based Graph Neural Network layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import mxnet as mx
from mxnet.gluon import nn
from .... import function as fn
from ..softmax import edge_softmax
from ..utils import normalize
from ....utils import expand_as_pair
class AGNNConv(nn.Block):
r"""Attention-based Graph Neural Network layer from paper `Attention-based
Graph Neural Network for Semi-Supervised Learning
<https://arxiv.org/abs/1803.03735>`__.
.. math::
H^{l+1} = P H^{l}
where :math:`P` is computed as:
.. math::
P_{ij} = \mathrm{softmax}_i ( \beta \cdot \cos(h_i^l, h_j^l))
Parameters
----------
init_beta : float, optional
The :math:`\beta` in the formula.
learn_beta : bool, optional
If True, :math:`\beta` will be learnable parameter.
"""
def __init__(self,
init_beta=1.,
learn_beta=True):
super(AGNNConv, self).__init__()
with self.name_scope():
self.beta = self.params.get('beta',
shape=(1,),
grad_req='write' if learn_beta else 'null',
init=mx.init.Constant(init_beta))
def forward(self, graph, feat):
r"""Compute AGNN Layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : mxnet.NDArray
The input feature of shape :math:`(N, *)` :math:`N` is the
number of nodes, and :math:`*` could be of any shape.
If a pair of mxnet.NDArray is given, the pair must contain two tensors of shape
:math:`(N_{in}, *)` and :math:`(N_{out}, *})`, the the :math:`*` in the later
tensor must equal the previous one.
Returns
-------
mxnet.NDArray
The output feature of shape :math:`(N, *)` where :math:`*`
should be the same as input shape.
"""
with graph.local_scope():
feat_src, feat_dst = expand_as_pair(feat, graph)
graph.srcdata['h'] = feat_src
graph.srcdata['norm_h'] = normalize(feat_src, p=2, axis=-1)
if isinstance(feat, tuple) or graph.is_block:
graph.dstdata['norm_h'] = normalize(feat_dst, p=2, axis=-1)
# compute cosine distance
graph.apply_edges(fn.u_dot_v('norm_h', 'norm_h', 'cos'))
cos = graph.edata.pop('cos')
e = self.beta.data(feat_src.context) * cos
graph.edata['p'] = edge_softmax(graph, e)
graph.update_all(fn.u_mul_e('h', 'p', 'm'), fn.sum('m', 'h'))
return graph.dstdata.pop('h')
| 36.706667 | 91 | 0.553578 |
f71b5dd3b2f1f6ba21eafc9f59670a50d9efc222 | 207 | py | Python | sciencer/expanders/__init__.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | 2 | 2022-03-28T17:27:21.000Z | 2022-03-29T22:27:15.000Z | sciencer/expanders/__init__.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | null | null | null | sciencer/expanders/__init__.py | SciencerIO/sciencer-toolkit | f17c4a5dfb6cc5dbabefe03b13eb1e5345f7b1b9 | [
"MIT"
] | 1 | 2022-03-28T14:47:53.000Z | 2022-03-28T14:47:53.000Z | """Sciencer Expanders"""
from .expander import Expander
from .expand_by_authors import ExpandByAuthors
from .expand_by_references import ExpandByReferences
from .expand_by_citations import ExpandByCitations
| 34.5 | 52 | 0.864734 |
f71b632bb314545ed7732ce47684f88d027b19e7 | 78 | py | Python | xpring/proto/__init__.py | mvadari/xpring-py | b837420127d1c1e5051ed305ed4f19fe9910a4f6 | [
"0BSD"
] | 6 | 2019-12-11T00:54:56.000Z | 2021-03-11T19:44:44.000Z | xpring/proto/__init__.py | mvadari/xpring-py | b837420127d1c1e5051ed305ed4f19fe9910a4f6 | [
"0BSD"
] | null | null | null | xpring/proto/__init__.py | mvadari/xpring-py | b837420127d1c1e5051ed305ed4f19fe9910a4f6 | [
"0BSD"
] | 9 | 2020-02-28T18:40:46.000Z | 2022-02-28T23:01:09.000Z | # The rest of this package, but not this __init__.py, is generated by protoc.
| 39 | 77 | 0.75641 |
f71b65b3b003148f57d2ed310d5f76f0d067c474 | 933 | py | Python | violas_client/canoser/bool_t.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | null | null | null | violas_client/canoser/bool_t.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | null | null | null | violas_client/canoser/bool_t.py | violas-core/violas-client | e8798f7d081ac218b78b81fd7eb2f8da92631a16 | [
"MIT"
] | 1 | 2022-01-05T06:49:42.000Z | 2022-01-05T06:49:42.000Z | from violas_client.canoser.base import Base
class BoolT(Base):
@classmethod
def encode(self, value):
if value:
return b'\1'
else:
return b'\0'
@classmethod
def decode_bytes(self, value):
if value == b'\0':
return False
elif value == b'\1':
return True
else:
raise TypeError("bool should be 0 or 1.")
@classmethod
def decode(self, cursor):
value = cursor.read_bytes(1)
return self.decode_bytes(value)
@classmethod
def from_value(cls, value):
if value:
return True
return False
@classmethod
def check_value(self, value):
if not isinstance(value, bool):
raise TypeError('value {} is not bool'.format(value))
@classmethod
def to_json_serializable(cls, value):
return value
| 23.923077 | 66 | 0.543408 |
f71b9e37908dd5da30752301903bfc85504aa496 | 728 | py | Python | Examples/AcceptAllRevisions.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 14 | 2018-07-15T17:01:52.000Z | 2018-11-29T06:15:33.000Z | Examples/AcceptAllRevisions.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 1 | 2018-09-28T12:59:34.000Z | 2019-10-08T08:42:59.000Z | Examples/AcceptAllRevisions.py | aspose-words-cloud/aspose-words-cloud-python | 65c7b55fa4aac69b60d41e7f54aed231df285479 | [
"MIT"
] | 2 | 2020-12-21T07:59:17.000Z | 2022-02-16T21:41:25.000Z | import os
import asposewordscloud
import asposewordscloud.models.requests
from asposewordscloud.rest import ApiException
from shutil import copyfile
words_api = WordsApi(client_id = '####-####-####-####-####', client_secret = '##################')
file_name = 'test_doc.docx'
# Upload original document to cloud storage.
my_var1 = open(file_name, 'rb')
my_var2 = file_name
upload_file_request = asposewordscloud.models.requests.UploadFileRequest(file_content=my_var1, path=my_var2)
words_api.upload_file(upload_file_request)
# Calls AcceptAllRevisions method for document in cloud.
my_var3 = file_name
request = asposewordscloud.models.requests.AcceptAllRevisionsRequest(name=my_var3)
words_api.accept_all_revisions(request) | 38.315789 | 108 | 0.787088 |
f71b9fe025bede004ce7e9ec58b828318f04188f | 15,536 | py | Python | src/api/block.py | BluCodeGH/Amulet-Map-Editor | 7d1d2243fc29095b3cffe8aa4979235444ba6738 | [
"MIT"
] | 1 | 2020-08-26T22:42:16.000Z | 2020-08-26T22:42:16.000Z | src/api/block.py | BluCodeGH/Amulet-Map-Editor | 7d1d2243fc29095b3cffe8aa4979235444ba6738 | [
"MIT"
] | null | null | null | src/api/block.py | BluCodeGH/Amulet-Map-Editor | 7d1d2243fc29095b3cffe8aa4979235444ba6738 | [
"MIT"
] | null | null | null | from __future__ import annotations
import copy
from sys import getsizeof
import re
from typing import Dict, Iterable, List, Tuple, Union, overload
from api.errors import InvalidBlockException
from utils import Int
class Block:
"""
Class to handle data about various blockstates and allow for extra blocks to be created and interacted with.
.. important::
Creating version specific block objects via the `Block()` constructor instead of using
:meth:`api.world.World.get_block_instance` is supported but not encouraged. To avoid possible caveats of doing this,
make sure to either only instantiate blocks with Amulet blockstate data or use
:meth:`api.world.World.get_block_instance` instead
Here's a few examples on how create a Block object with extra blocks:
Creating a new Block object with the base of ``stone`` and has an extra block of ``water[level=1]``:
>>> stone = Block(blockstate="minecraft:stone")
>>> water_level_1 = Block(blockstate="minecraft:water[level=1]")
>>> stone_with_extra_block = stone + water_level_1
>>> repr(stone_with_extra_block)
'Block(minecraft:stone, minecraft:water[level=1])'
Creating a new Block object using the namespace and base_name:
>>> granite = Block(namespace="minecraft", base_name="granite")
Creating a new Block object with another layer of extra blocks:
>>> stone_water_granite = stone_with_extra_block + granite # Doesn't modify any of the other objects
>>> repr(stone_water_granite)
'Block(minecraft:stone, minecraft:water[level=1], minecraft:granite)'
Creating a new Block object by removing an extra block from all layers:
*Note: This removes all instances of the Block object from extra blocks*
>>> stone_granite = stone_water_granite - water_level_1 # Doesn't modify any of the other objects either
>>> repr(stone_granite)
'Block(minecraft:stone, minecraft:granite)'
Creating a new Block object by removing a specific layer:
>>> oak_log_axis_x = Block(blockstate="minecraft:oak_log[axis=x]")
>>> stone_water_granite_water_oak_log = stone_water_granite + water_level_1 + oak_log_axis_x
>>> repr(stone_water_granite_water_oak_log)
'Block(minecraft:stone, minecraft:water[level=1], minecraft:granite, minecraft:water[level=1], minecraft:oak_log[axis=x])'
>>> stone_granite_water_oak_log = stone_water_granite_water_oak_log.remove_layer(0)
>>> repr(stone_granite_water_oak_log)
'Block(minecraft:stone, minecraft:granite, minecraft:water[level=1], minecraft:oak_log[axis=x])'
"""
__slots__ = (
"_namespace",
"_base_name",
"_properties",
"_extra_blocks",
"_blockstate",
) # Reduces memory footprint
blockstate_regex = re.compile(
r"(?:(?P<namespace>[a-z0-9_.-]+):)?(?P<base_name>[a-z0-9/._-]+)(?:\[(?P<property_name>[a-z0-9_]+)=(?P<property_value>[a-z0-9_]+)(?P<properties>.*)\])?"
)
parameters_regex = re.compile(r"(?:,(?P<name>[a-z0-9_]+)=(?P<value>[a-z0-9_]+))")
def __init__(
self,
blockstate: str = None,
namespace: str = None,
base_name: str = None,
properties: Dict[str, Union[str, bool, int]] = None,
extra_blocks: Union[Block, Iterable[Block]] = None,
):
self._blockstate = blockstate
self._namespace = namespace
self._base_name = base_name
if namespace is not None and base_name is not None and properties is None:
properties = {}
self._properties = properties
self._extra_blocks = ()
if extra_blocks:
if isinstance(extra_blocks, Block):
extra_blocks = [extra_blocks]
self._extra_blocks = tuple(extra_blocks)
if blockstate:
self._gen_blockstate()
@property
def namespace(self) -> str:
"""
The namespace of the blockstate represented by the Block object (IE: `minecraft`)
:return: The namespace of the blockstate
"""
if self._namespace is None:
self._parse_blockstate_string()
return self._namespace
@property
def base_name(self) -> str:
"""
The base name of the blockstate represented by the Block object (IE: `stone`, `dirt`)
:return: The base name of the blockstate
"""
if self._base_name is None:
self._parse_blockstate_string()
return self._base_name
@property
def properties(self) -> Dict[str, Union[str, bool, int]]:
"""
The mapping of properties of the blockstate represented by the Block object (IE: `{"level": "1"}`)
:return: A dictionary of the properties of the blockstate
"""
if self._properties is None:
self._parse_blockstate_string()
return copy.deepcopy(self._properties)
@property
def blockstate(self) -> str:
"""
The full blockstate string of the blockstate represented by the Block object (IE: `minecraft:stone`, `minecraft:oak_log[axis=x]`)
:return: The blockstate string
"""
if self._blockstate is None:
self._gen_blockstate()
return self._blockstate
@property
def extra_blocks(self) -> Union[Tuple, Tuple[Block]]:
"""
Returns a tuple of the extra blocks contained in the Block instance
:return: A tuple of Block objects
"""
return self._extra_blocks
def _gen_blockstate(self):
self._blockstate = f"{self.namespace}:{self.base_name}"
if self.properties:
props = [f"{key}={value}" for key, value in sorted(self.properties.items())]
self._blockstate = f"{self._blockstate}[{','.join(props)}]"
@staticmethod
def parse_blockstate_string(blockstate: str) -> Tuple[str, str, Dict[str, str]]:
match = Block.blockstate_regex.match(blockstate)
namespace = match.group("namespace") or "minecraft"
base_name = match.group("base_name")
if match.group("property_name") is not None:
properties = {match.group("property_name"): match.group("property_value")}
else:
properties = {}
properties_string = match.group("properties")
if properties_string is not None:
properties_match = Block.parameters_regex.finditer(properties_string)
for match in properties_match:
properties[match.group("name")] = match.group("value")
return namespace, base_name, {k: v for k, v in sorted(properties.items())}
def _parse_blockstate_string(self):
self._namespace, self._base_name, self._properties = self.parse_blockstate_string(
self._blockstate
)
def __str__(self) -> str:
"""
:return: The base blockstate string of the Block object
"""
return self.blockstate
def __repr__(self) -> str:
"""
:return: The base blockstate string of the Block object along with the blockstate strings of included extra blocks
"""
return f"Block({', '.join([str(b) for b in (self, *self.extra_blocks)])})"
def __len__(self):
return len(self._extra_blocks) + 1
def _compare_extra_blocks(self, other: Block) -> bool:
if len(self.extra_blocks) != len(other.extra_blocks):
return False
if len(self.extra_blocks) == 0:
return True
for our_extra_block, their_extra_block in zip(
self.extra_blocks, other.extra_blocks
):
if our_extra_block != their_extra_block:
return False
return True
def __eq__(self, other: Block) -> bool:
"""
Checks the equality of this Block object to another Block object
:param other: The Block object to check against
:return: True if the Blocks objects are equal, False otherwise
"""
if self.__class__ != other.__class__:
return False
return self.blockstate == other.blockstate and self._compare_extra_blocks(other)
def __hash__(self) -> int:
"""
Hashes the Block object
:return: A hash of the Block object
"""
current_hash = hash(self.blockstate)
if self.extra_blocks:
current_hash = current_hash + hash(self.extra_blocks)
return current_hash
def __add__(self, other: Block) -> Block:
"""
Allows for other Block objects to be added to this Block object's ``extra_blocks``
:param other: The Block object to add to the end of this Block object's `extra_blocks`
:return: A new Block object with the same data but with an additional Block at the end of ``extra_blocks``
"""
if not isinstance(other, Block):
return NotImplemented
if (
len(other.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_cpy = other
else:
other_cpy = Block(
namespace=other.namespace,
base_name=other.base_name,
properties=other.properties,
)
other_extras = []
for eb in other.extra_blocks:
if (
len(eb.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_extras.append(eb)
else:
other_extras.append(
Block(
namespace=eb.namespace,
base_name=eb.base_name,
properties=eb.properties,
)
)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=[*self.extra_blocks, other_cpy, *other_extras],
)
def __sub__(self, other: Block) -> Block:
"""
Allows for other Block objects to be subtracted from this Block object's ``extra_blocks``
:param other: The Block object to subtract from this Block objects' ``extra_blocks``
:return: A new Block object without any instances of the subtracted block in ``extra_blocks``
"""
if not isinstance(other, Block):
return NotImplemented
if (
len(other.extra_blocks) == 0
): # Reduces the amount of extra objects/references created
other_cpy = other
else:
other_cpy = Block(
namespace=other.namespace,
base_name=other.base_name,
properties=other.properties,
)
other_extras = []
for eb in other.extra_blocks:
if len(eb.extra_blocks) == 0:
other_extras.append(eb)
else:
other_extras.append(
Block(
namespace=eb.namespace,
base_name=eb.base_name,
properties=eb.properties,
)
)
# Sets are unordered, so a regular set subtraction doesn't always return the order we want (it sometimes will!)
# So we loop through all of our extra blocks and only append those to the new_extras list if they aren't in
# extra_blocks_to_remove
new_extras = []
extra_blocks_to_remove = (other_cpy, *other_extras)
for eb in self.extra_blocks:
if eb not in extra_blocks_to_remove:
new_extras.append(eb)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=new_extras,
)
def remove_layer(self, layer: int) -> Block:
"""
Removes the Block object from the specified layer and returns the resulting new Block object
:param layer: The layer of extra block to remove
:return: A new instance of Block with the same data but with the extra block at specified layer removed
:raises `InvalidBlockException`: Raised when you remove the base block from a Block with no other extra blocks
"""
if (
layer == 0
and len(self.extra_blocks) > 0
and layer <= len(self.extra_blocks)
):
new_base = self._extra_blocks[0]
return Block(
namespace=new_base.namespace,
base_name=new_base.base_name,
properties=new_base.properties,
extra_blocks=[*self._extra_blocks[1:]],
)
elif layer > len(self.extra_blocks):
raise InvalidBlockException("You cannot remove a non-existant layer")
elif layer == 0:
raise InvalidBlockException(
"Removing the base block with no extra blocks is not supported"
)
return Block(
namespace=self.namespace,
base_name=self.base_name,
properties=self.properties,
extra_blocks=[*self.extra_blocks[: layer - 1], *self.extra_blocks[layer:]],
)
def __sizeof__(self):
size = (
getsizeof(self.namespace)
+ getsizeof(self.base_name)
+ getsizeof(self.properties)
+ getsizeof(self.blockstate)
)
for eb in self.extra_blocks:
size += getsizeof(eb)
return size
class BlockManager:
"""
Class to handle the mappings between Block objects and their index-based internal IDs
"""
def __init__(self):
"""
Creates a new BlockManager object
"""
self._index_to_block: List[Block] = []
self._block_to_index_map: Dict[Block, int] = {}
def __len__(self):
return len(self._index_to_block)
def __contains__(self, item: Block) -> bool:
return item in self._block_to_index_map
@overload
def __getitem__(self, item: Block) -> int:
...
@overload
def __getitem__(self, item: Int) -> Block:
...
def __getitem__(self, item):
"""
If a Block object is passed to this function, it'll return the internal ID/index of the
blockstate. If an int is given, this method will return the Block object at that specified index.
:param item: The Block object or int to get the mapping data of
:return: An int if a Block object was supplied, a Block object if an int was supplied
"""
try:
if isinstance(item, Block):
return self._block_to_index_map[item]
return self._index_to_block[item]
except (KeyError, IndexError):
raise KeyError(
f"There is no {item} in the BlockManager. "
f"You might want to use the `add_block` function for your blocks before accessing them."
)
def get_add_block(self, block: Block) -> int:
"""
Adds a Block object to the internal Block object/ID mappings. If the Block already exists in the mappings,
then the existing ID is returned
:param block: The Block to add to the manager
:return: The internal ID of the Block
"""
if block in self._block_to_index_map:
return self._block_to_index_map[block]
self._block_to_index_map[block] = i = len(self._block_to_index_map)
self._index_to_block.append(block)
return i
| 35.149321 | 159 | 0.613092 |
f71bc619e7d4702a0d959637f7bce8d52e79debf | 3,636 | py | Python | LaserCommandConstants.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | LaserCommandConstants.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | LaserCommandConstants.py | jaredly/meerk40t | 446427e29104cb89fd2ee17ad824fc801d44afe0 | [
"MIT"
] | null | null | null | """
Laser Commands are a middle language of commands for spooling and interpreting.
NOTE: Never use the integer value, only the command name. The integer values are
permitted to change.
COMMAND_PLOT: takes a plot object to generate simple plot commands.
COMMAND_RASTER: takes a raster plot object which generates simple raster commands.
Simple plot values are x, y, on. Where x and y are the position in absolute values and on is whether the laser fires
for that particular move command. The plot is expected to use svgelements code, passed to zinglplotter code.
The raster is expected to used RasterBuilder which should be able to plot any raster in any fashion.
A COMMAND_RESUME would have to be issued in realtime since in a paused state the commands are not processed.
"""
COMMAND_LASER_OFF = 1 # Turns laser off
COMMAND_LASER_ON = 2 # Turns laser on
COMMAND_LASER_DISABLE = 5 # Disables the laser
COMMAND_LASER_ENABLE = 6 # Enables the laser
COMMAND_MOVE = 10 # Performs a line move
COMMAND_CUT = 11 # Performs a line cut.
COMMAND_WAIT = 20 # Pauses the given time in seconds. (floats accepted).
COMMAND_WAIT_FINISH = 21 # WAIT until the buffer is finished.
COMMAND_MODE_RAPID = 50
COMMAND_MODE_PROGRAM = 51
COMMAND_MODE_FINISHED = 52
COMMAND_PLOT = 100 # Takes a plot object
COMMAND_RASTER = 101 # Takes a raster plot object.
COMMAND_SET_SPEED = 200 # sets the speed for the device
COMMAND_SET_POWER = 201 # sets the power. Out of 1000. Unknown power method.
COMMAND_SET_PPI = 203 # sets the PPI power. Out of 1000.
COMMAND_SET_PWM = 203 # sets the PWM power. Out of 1000.
COMMAND_SET_STEP = 205 # sets the raster step for the device
COMMAND_SET_DIRECTION = 209 # sets the directions for the device.
COMMAND_SET_OVERSCAN = 206
COMMAND_SET_D_RATIO = 207 # sets the diagonal_ratio for the device
COMMAND_SET_ACCELERATION = 208 # sets the acceleration for the device 1-4
COMMAND_SET_INCREMENTAL = 210 # sets the commands to be relative to current position
COMMAND_SET_ABSOLUTE = 211 # sets the commands to be absolute positions.
COMMAND_SET_POSITION = 220 # Without moving sets the current position to the given coord.
COMMAND_HOME = 300 # Homes the device
COMMAND_LOCK = 301 # Locks the rail
COMMAND_UNLOCK = 302 # Unlocks the rail.
COMMAND_BEEP = 320 # Beep.
COMMAND_FUNCTION = 350 # Execute the function given by this command. Blocking.
COMMAND_SIGNAL = 360 # Sends the signal, given: "signal_name", operands.
REALTIME_RESET = 1000 # Resets the state, purges buffers
REALTIME_PAUSE = 1010 # Issue a pause command.
REALTIME_RESUME = 1020 # Issue a resume command.
REALTIME_STATUS = 1030 # Issue a status command.
REALTIME_SAFETY_DOOR = 1040 # Issues a forced safety_door state.
REALTIME_JOG_CANCEL = 1050 # Issues a jog cancel. This should cancel any jogging being processed.
REALTIME_SPEED_PERCENT = 1060 # Set the speed to this percent value of total.
REALTIME_RAPID_PERCENT = 1070 # Sets the rapid speed to this percent value of total.
REALTIME_POWER_PERCENT = 1080 # Sets the power to this percent value of total.
REALTIME_SPEED = 1061 # Set the speed to this percent value of total.
REALTIME_RAPID = 1071 # Sets the rapid speed to this percent value of total.
REALTIME_POWER = 1081 # Sets the power to this percent value of total.
REALTIME_OVERSCAN = 1091 # Sets the overscan amount to this value.
REALTIME_LASER_DISABLE = 1100 # Disables the laser.
REALTIME_LASER_ENABLE = 1101 # Enables the laser.
REALTIME_FLOOD_COOLANT = 1210 # Toggle flood coolant
REALTIME_MIST_COOLANT = 1220 # Toggle mist coolant.
| 52.695652 | 117 | 0.764851 |
f71bcbceeb060b7a31ed8e3353c036d8c37f27b4 | 621 | py | Python | supriya/ugens/BRF.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/BRF.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | supriya/ugens/BRF.py | deeuu/supriya | 14fcb5316eccb4dafbe498932ceff56e1abb9d27 | [
"MIT"
] | null | null | null | import collections
from supriya import CalculationRate
from supriya.ugens.Filter import Filter
class BRF(Filter):
"""
A 2nd order Butterworth band-reject filter.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> b_r_f =supriya.ugens.BRF.ar(source=source)
>>> b_r_f
BRF.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Filter UGens"
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 440.0), ("reciprocal_of_q", 1.0)]
)
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
| 21.413793 | 79 | 0.645733 |
f71bf52008d28c422bd88b5ecd34e9e1fab1fa11 | 2,202 | py | Python | models/ModelUtil/util.py | Easonyesheng/StereoCameraToolk | 660e43019d0687e96e6da3aca48c1c423ae5abff | [
"MIT"
] | 27 | 2020-10-16T07:21:35.000Z | 2022-03-11T02:56:13.000Z | models/ModelUtil/util.py | Easonyesheng/StereoCamera | 9319b7f4e5ce36833de722a15e1074e82b8b4f84 | [
"MIT"
] | null | null | null | models/ModelUtil/util.py | Easonyesheng/StereoCamera | 9319b7f4e5ce36833de722a15e1074e82b8b4f84 | [
"MIT"
] | 6 | 2021-02-01T09:54:40.000Z | 2022-03-11T03:16:39.000Z | """Utility """
import numpy as np
import cv2
import os
import logging
def check_string_is_empty(string):
"""name
check string empty or not
Args:
Returns:
"""
if string == '':
return True
return False
def check_numpy_array(array):
"""name
check array empty or not
Args:
Returns:
True - Exist
"""
try:
array.all()
except AttributeError:
return False
return True
def after_cv_imshow():
"""name
close all the show window if press 'esc'
set after cv2.imshow()
Args:
Returns:
"""
k = cv2.waitKey(0)
if k == 27:
cv2.destroyAllWindows()
def save_img_with_prefix(img, path, name):
"""name
save as 'path/name.jpg'
Args:
Returns:
"""
cv2.imwrite(os.path.join(path,name+'.jpg'), img)
def img_show(img, name):
"""
"""
cv2.startWindowThread()
img = img / np.max(img)
cv2.imshow(name, img)
after_cv_imshow()
def test_dir_if_not_create(path):
"""name
save as 'path/name.jpg'
Args:
Returns:
"""
if os.path.isdir(path):
return True
else:
print('Create New Folder:', path)
os.makedirs(path)
return True
def log_init(logfilename):
"""name
save as 'path/name.jpg'
Args:
Returns:
"""
# logging.basicConfig(filename=logfilename, level=logging.INFO)
# logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
# filename=logfilename,
# level=logging.DEBUG)
logger = logging.getLogger() # 不加名称设置root logger
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s: - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# 使用FileHandler输出到文件
fh = logging.FileHandler(logfilename, 'w')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# 使用StreamHandler输出到屏幕
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
# 添加两个Handler
logger.addHandler(ch)
logger.addHandler(fh) | 18.504202 | 108 | 0.583106 |
f71c071affad74a0e7aea1a05a898c897c918ab8 | 691 | py | Python | emtract/model_inference.py | dvamossy/EmTract | 68a00e3d63fbc2c401b0d2b297bf96ffb75940e8 | [
"MIT"
] | 16 | 2021-12-02T18:59:56.000Z | 2022-03-31T11:42:12.000Z | emtract/model_inference.py | dvamossy/EmTract | 68a00e3d63fbc2c401b0d2b297bf96ffb75940e8 | [
"MIT"
] | null | null | null | emtract/model_inference.py | dvamossy/EmTract | 68a00e3d63fbc2c401b0d2b297bf96ffb75940e8 | [
"MIT"
] | 1 | 2021-12-09T06:05:22.000Z | 2021-12-09T06:05:22.000Z | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from emtract.model import Model, ModelType
import pandas as pd
class ModelInference:
MODEL_BASE_PATH = 'build/models/'
DATA_BASE_PATH = './emtract/data/'
def __init__(self, model_type):
if model_type == 'twitter':
self.model = Model(ModelType.TWITTER)
else:
self.model = Model(ModelType.STOCK_TWITS)
def inference(self, text):
return self.model.predict([text])
def file_inference(self, file_name, output):
df = pd.read_csv(file_name, header=None)
predictions = self.model.predict(df.iloc[:, 0].values)
predictions.to_csv(output, index=False)
| 27.64 | 62 | 0.662808 |
f71c111b67dac5359468b1d2de3970e43bfa4ea3 | 5,551 | py | Python | leetcode_python/Array/longest-arithmetic-subsequence.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Array/longest-arithmetic-subsequence.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | leetcode_python/Array/longest-arithmetic-subsequence.py | yennanliu/Python_basics | 6a597442d39468295946cefbfb11d08f61424dc3 | [
"Unlicense"
] | null | null | null | """
1027. Longest Arithmetic Subsequence
Medium
Given an array nums of integers, return the length of the longest arithmetic subsequence in nums.
Recall that a subsequence of an array nums is a list nums[i1], nums[i2], ..., nums[ik] with 0 <= i1 < i2 < ... < ik <= nums.length - 1, and that a sequence seq is arithmetic if seq[i+1] - seq[i] are all the same value (for 0 <= i < seq.length - 1).
Example 1:
Input: nums = [3,6,9,12]
Output: 4
Explanation:
The whole array is an arithmetic sequence with steps of length = 3.
Example 2:
Input: nums = [9,4,7,2,10]
Output: 3
Explanation:
The longest arithmetic subsequence is [4,7,10].
Example 3:
Input: nums = [20,1,15,3,10,5,8]
Output: 4
Explanation:
The longest arithmetic subsequence is [20,15,10,5].
Constraints:
2 <= nums.length <= 1000
0 <= nums[i] <= 500
"""
# V0
# IDEA : DP
class Solution:
def longestArithSeqLength(self, A):
dp = {}
for i in range(len(A)):
for j in range(i + 1, len(A)):
dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1
return max(dp.values())
# V0'
# IDEA : HASH TABLE
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution
class Solution:
def longestArithSeqLength(self, A):
aux, cnt, prefix = {a : {} for a in A}, {}, set()
for a in A:
cnt[a] = cnt[a] + 1 if a in cnt else 1
for b in prefix:
if a != b:
aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2
prefix.add(a)
max_const = max(cnt.values())
max_aux = max(max(d.values()) for a, d in aux.items() if d)
return max(max_const, max_aux, 2)
# V1
# https://www.796t.com/article.php?id=154559
# http://www.noteanddata.com/leetcode-1027-Longest-Arithmetic-Sequence-Google-Interview-Problem-java-solution-note.html
# https://blog.csdn.net/w5688414/article/details/109696664
# V1
# IDEA : HASH
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274657/Short-Python-solution
class Solution:
def longestArithSeqLength(self, A):
aux, cnt, prefix = {a : {} for a in A}, {}, set()
for a in A:
cnt[a] = cnt[a] + 1 if a in cnt else 1
for b in prefix:
if a != b:
aux[a][a - b] = 1 + aux[b][a - b] if a - b in aux[b] else 2
prefix.add(a)
max_const = max(cnt.values())
max_aux = max(max(d.values()) for a, d in aux.items() if d)
return max(max_const, max_aux, 2)
# V1'
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/275395/python-O(n**2)-solution
class Solution:
def longestArithSeqLength(self, A):
# Constant seq: '0000', O(len(A) )
ct = collections.Counter(A)
ans = max(2, max(ct[i] for i in ct))
# Increasing seq:'1234', O(len(A)**2 )
ansdic = {}
for i in range(len(A)):
for j in range(i):
a0, a1, a2 = A[j]*2-A[i], A[j], A[i]
if a0 == a1:continue
if (a0, a1) in ansdic:
ansdic[a1, a2] = ansdic[a0, a1] + 1
ans = max(ansdic[a1, a2], ans)
else:
ansdic[a1, a2] = 2
return ans
# V1''
# IDEA : HASH SET
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274625/simple-hash-Set-Python
class Solution(object):
def longestArithSeqLength(self, A):
res = 2
if len(A) <= 2:
return len(A)
cnt = {}
node = {}
mx = {}
curr = A[1] - A[0]
cnt[(curr,1)] = 2
node[curr] = set()
node[curr].add(1)
mx[curr] = 2
res = 2
for i in range(2,len(A)):
for j in range(i):
dis = A[i] - A[j]
if dis in node:
if j in node[dis]:
cnt[(dis,i)] = cnt[(dis,j)] + 1
#node[dis].remove(j)
node[dis].add(i)
mx[dis] = max(mx[dis], cnt[(dis,i)])
res = max(mx[dis],res)
else:
cnt[(dis,i)] = 2
node[dis].add(i)
else:
cnt[(dis,i)] = 2
node[dis] = set()
node[dis].add(i)
mx[dis] = 2
return res
# V1'''
# IDEA : DP
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/274611/JavaC%2B%2BPython-DP
class Solution:
def longestArithSeqLength(self, A):
dp = {}
for i in range(len(A)):
for j in range(i + 1, len(A)):
dp[j, A[j] - A[i]] = dp.get((i, A[j] - A[i]), 1) + 1
return max(dp.values())
# V1''''
# IDEA : DP
# https://leetcode.com/problems/longest-arithmetic-subsequence/discuss/514742/Python-DP
class Solution:
def longestArithSeqLength(self, A):
DP = {}
A_len = len(A)
for right in range(1, A_len):
for left in range(right):
diff = A[right] - A[left]
#if (diff, left) in DP:
# DP[(diff, right)] = DP[(diff, left)] + 1
#else:
# DP[(diff, right)] = 2
DP[(diff, right)] = DP.get((diff,left), 1) + 1
return max(DP.values())
# V2 | 31.361582 | 248 | 0.503513 |
f71c43a114c10204ad2b5ff2693265ff01dc5894 | 64 | py | Python | dr/__init__.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | 7 | 2019-06-09T13:03:18.000Z | 2022-02-19T08:50:51.000Z | dr/__init__.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | 1 | 2019-07-12T23:59:46.000Z | 2021-11-21T04:09:09.000Z | dr/__init__.py | jigangkim/domain_randomization | 07a309a9e824b5332219871abe8f0f657694b292 | [
"MIT"
] | 3 | 2020-05-01T13:18:25.000Z | 2021-03-30T11:52:33.000Z | import dr.dist
import dr.experiment
import dr.gym
import dr.ppo
| 12.8 | 20 | 0.8125 |
f71c525adccaf762f27c3b40fe128ecd416463bd | 6,309 | py | Python | dico/base/model.py | dico-api/dico | 0bb80e2bb8dd66bb5078e52c9e37c180b7c80319 | [
"MIT"
] | 37 | 2021-08-23T00:16:42.000Z | 2022-02-22T23:19:47.000Z | dico/base/model.py | dico-api/dico | 0bb80e2bb8dd66bb5078e52c9e37c180b7c80319 | [
"MIT"
] | 14 | 2021-08-07T09:01:29.000Z | 2022-02-27T15:19:36.000Z | dico/base/model.py | eunwoo1104/dico | 0bb80e2bb8dd66bb5078e52c9e37c180b7c80319 | [
"MIT"
] | 9 | 2021-08-25T04:14:05.000Z | 2022-02-27T15:08:49.000Z | import copy
import typing
from ..model.snowflake import Snowflake
if typing.TYPE_CHECKING:
from ..api import APIClient
class CopyableObject:
def copy(self):
return copy.deepcopy(self)
class EventBase:
def __init__(self, client: "APIClient", resp: dict):
self.raw: dict = resp
self.client: "APIClient" = client
self._dont_dispatch: bool = False
@classmethod
def create(cls, client, resp: dict):
return cls(client, resp)
class DiscordObjectBase(CopyableObject):
TYPING = typing.Union[
int, str, Snowflake, "DiscordObjectBase", typing.Type["DiscordObjectBase"]
]
RESPONSE = typing.Union["DiscordObjectBase", typing.Awaitable["DiscordObjectBase"]]
RESPONSE_AS_LIST = typing.Union[
typing.List["DiscordObjectBase"],
typing.Awaitable[typing.List["DiscordObjectBase"]],
]
_cache_type = None
def __init__(self, client: "APIClient", resp: dict, **kwargs: typing.Any):
resp.update(kwargs)
# self._cache_type = None
self.raw: dict = resp.copy()
self.id: Snowflake = Snowflake(resp["id"])
self.client: "APIClient" = client
def __int__(self) -> int:
return int(self.id)
def __eq__(self, other):
return int(self.id) == int(other)
def __ne__(self, other):
return int(self.id) != int(other)
def __hash__(self):
return hash(self.id)
def update(self, new_resp: dict, **kwargs: typing.Any):
orig = self.raw
for k, v in new_resp.items():
if orig.get(k) != v:
orig[k] = v
self.__init__(self.client, orig, **kwargs)
@classmethod
def create(cls, client: "APIClient", resp: dict, **kwargs: typing.Any):
ensure_cache_type = kwargs.pop("ensure_cache_type", cls._cache_type)
prevent_caching = kwargs.pop("prevent_caching", False)
maybe_exist = client.has_cache and client.cache.get(
resp["id"], ensure_cache_type
)
if maybe_exist:
if prevent_caching:
maybe_exist = maybe_exist.copy()
maybe_exist.update(resp, **kwargs)
"""
orig = maybe_exist.raw
for k, v in resp.items():
if orig.get(k) != v:
orig[k] = v
maybe_exist.__init__(client, orig, **kwargs)
"""
return maybe_exist
else:
ret = cls(client, resp, **kwargs)
if client.has_cache and not prevent_caching:
client.cache.add(ret.id, ret._cache_type, ret)
if hasattr(ret, "guild_id") and ret.guild_id:
client.cache.get_guild_container(ret.guild_id).add(
ret.id, ret._cache_type, ret
)
return ret
class AbstractObject(dict):
RESPONSE = typing.Union["AbstractObject", typing.Awaitable["AbstractObject"]]
RESPONSE_AS_LIST = typing.Union[
typing.List["AbstractObject"], typing.Awaitable[typing.List["AbstractObject"]]
]
def __init__(self, resp: dict):
super().__init__(**resp)
def __getattr__(self, item):
return self.get(item)
def __setattr__(self, key, value):
self[key] = value
class FlagBase:
def __init__(self, *args: str, **kwargs: bool):
self.values: typing.Dict[str, int] = {
x: getattr(self, x) for x in dir(self) if isinstance(getattr(self, x), int)
}
self.value: int = 0
for x in args:
if x.upper() not in self.values:
raise AttributeError(f"invalid name: `{x}`")
self.value |= self.values[x.upper()]
for k, v in kwargs.items():
if k.upper() not in self.values:
raise AttributeError(f"invalid name: `{k}`")
if v:
self.value |= self.values[k.upper()]
def __int__(self) -> int:
return self.value
def __getattr__(self, item):
if item.startswith("__"):
return self.__getattribute__(item)
return self.has(item)
def __iter__(self):
for k, v in self.values.items():
if self.has(k):
yield v
def has(self, name: str) -> bool:
if name.upper() not in self.values:
raise AttributeError(f"invalid name: `{name}`")
return (self.value & self.values[name.upper()]) == self.values[name.upper()]
def __setattr__(self, key, value):
orig = key
key = key.upper()
if orig in ["value", "values"] or key not in self.values.keys():
return super().__setattr__(orig, value)
if not isinstance(value, bool):
raise TypeError(f"only type `bool` is supported.")
has_value = self.has(key)
if value and not has_value:
self.value |= self.values[key]
elif not value and has_value:
self.value &= ~self.values[key]
def add(self, value: str):
return self.__setattr__(value, True)
def remove(self, value: str):
return self.__setattr__(value, False)
@classmethod
def from_value(cls, value: int):
ret = cls()
ret.value = value
return ret
class TypeBase:
def __init__(self, value):
self.values: typing.Dict[int, str] = {
getattr(self, x): x for x in dir(self) if isinstance(getattr(self, x), int)
}
self.value: int = value
if self.value not in self.values:
raise AttributeError(f"invalid value: {value}")
def __str__(self) -> str:
return self.values[self.value]
def __int__(self) -> int:
return self.value
def __getattr__(self, item):
if item.startswith("__"):
return self.__getattribute__(item)
return self.is_type(item)
def is_type(self, name: str) -> bool:
values = {y: x for x, y in self.values.items()}
if name.upper() not in values:
raise AttributeError(f"invalid name: `{name}`")
return self.value == values[name.upper()]
@classmethod
def to_string(cls, value: int) -> str:
values = {
getattr(cls, x): x for x in dir(cls) if isinstance(getattr(cls, x), int)
}
return values.get(value)
| 31.232673 | 87 | 0.579648 |
f71c54fd629fe7b2eed4f7bb9d796bb0a3a708f2 | 17,077 | py | Python | ignite/contrib/engines/common.py | VinhLoiIT/ignite | 3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56 | [
"BSD-3-Clause"
] | 1 | 2020-11-08T16:27:24.000Z | 2020-11-08T16:27:24.000Z | ignite/contrib/engines/common.py | VinhLoiIT/ignite | 3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56 | [
"BSD-3-Clause"
] | null | null | null | ignite/contrib/engines/common.py | VinhLoiIT/ignite | 3b2b9655ea9f80ce49b8a9f1c2d72f80e2a95f56 | [
"BSD-3-Clause"
] | null | null | null | from functools import partial
import warnings
import numbers
from collections.abc import Sequence, Mapping
import torch
import torch.distributed as dist
from ignite.engine import Engine, Events
from ignite.metrics import RunningAverage
from ignite.handlers import TerminateOnNan, ModelCheckpoint, EarlyStopping
from ignite.contrib.metrics import GpuInfo
from ignite.contrib.handlers import ProgressBar
from ignite.contrib.handlers import VisdomLogger
from ignite.contrib.handlers import TensorboardLogger, global_step_from_engine
import ignite.contrib.handlers.tensorboard_logger as tb_logger_module
import ignite.contrib.handlers.visdom_logger as visdom_logger_module
from ignite.contrib.handlers import MLflowLogger
import ignite.contrib.handlers.mlflow_logger as mlflow_logger_module
from ignite.contrib.handlers import PolyaxonLogger
import ignite.contrib.handlers.polyaxon_logger as polyaxon_logger_module
def setup_common_training_handlers(
trainer,
train_sampler=None,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=False,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
"""Helper method to setup trainer with common handlers (it also supports distributed configuration):
- :class:`~ignite.handlers.TerminateOnNan`
- handler to setup learning rate scheduling
- :class:`~ignite.handlers.ModelCheckpoint`
- :class:`~ignite.metrics.RunningAverage` on `update_function` output
- Two progress bars on epochs and optionally on iterations
Args:
trainer (Engine): trainer engine. Output of trainer's `update_function` should be a dictionary
or sequence or a single tensor.
train_sampler (torch.utils.data.DistributedSampler, optional): Optional distributed sampler used to call
`set_epoch` method on epoch started event.
to_save (dict, optional): dictionary with objects to save in the checkpoint. This is used with
:class:`~ignite.handlers.ModelCheckpoint`.
save_every_iters (int, optional): saving interval. By default, `to_save` objects are stored
each 1000 iterations.
output_path (str, optional): output path to indicate where `to_save` objects are stored.
lr_scheduler (ParamScheduler or subclass of `torch.optim.lr_scheduler._LRScheduler`): learning rate scheduler
as native torch LRScheduler or ignite's parameter scheduler.
with_gpu_stats (bool, optional): if True, :class:`~ignite.contrib.metrics.handlers.GpuInfo` is attached to the
trainer. This requires `pynvml` package to be installed.
output_names (list/tuple): list of names associated with `update_function` output dictionary.
with_pbars (bool, optional): if True, two progress bars on epochs and optionally on iterations are attached
with_pbar_on_iters (bool, optional): if True, a progress bar on iterations is attached to the trainer.
log_every_iters (int, optional): logging interval for :class:`~ignite.contrib.metrics.handlers.GpuInfo` and for
epoch-wise progress bar.
device (str of torch.device, optional): Optional device specification in case of distributed computation usage.
"""
kwargs = dict(
to_save=to_save,
save_every_iters=save_every_iters,
output_path=output_path,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
device=device,
)
if dist.is_available() and dist.is_initialized():
_setup_common_distrib_training_handlers(trainer, train_sampler=train_sampler, **kwargs)
else:
if train_sampler is not None:
warnings.warn(
"Argument train_sampler distributed sampler used to call `set_epoch` method on epoch "
"started event, but no distributed setting detected",
UserWarning,
)
_setup_common_training_handlers(trainer, **kwargs)
setup_common_distrib_training_handlers = setup_common_training_handlers
def _setup_common_training_handlers(
trainer,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=True,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
if lr_scheduler is not None:
if isinstance(lr_scheduler, torch.optim.lr_scheduler._LRScheduler):
trainer.add_event_handler(Events.ITERATION_COMPLETED, lambda engine: lr_scheduler.step())
else:
trainer.add_event_handler(Events.ITERATION_STARTED, lr_scheduler)
trainer.add_event_handler(Events.EPOCH_COMPLETED, empty_cuda_cache)
if to_save is not None:
if output_path is None:
raise ValueError("If to_save argument is provided then output_path argument should be also defined")
checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix="training")
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)
if with_gpu_stats:
GpuInfo().attach(trainer, name="gpu", event_name=Events.ITERATION_COMPLETED(every=log_every_iters))
if output_names is not None:
def output_transform(x, index, name):
if isinstance(x, Mapping):
return x[name]
elif isinstance(x, Sequence):
return x[index]
elif isinstance(x, (torch.Tensor, numbers.Number)):
return x
else:
raise ValueError(
"Unhandled type of update_function's output. "
"It should either mapping or sequence, but given {}".format(type(x))
)
for i, n in enumerate(output_names):
RunningAverage(
output_transform=partial(output_transform, index=i, name=n), epoch_bound=False, device=device
).attach(trainer, n)
if with_pbars:
if with_pbar_on_iters:
ProgressBar(persist=False).attach(
trainer, metric_names="all", event_name=Events.ITERATION_COMPLETED(every=log_every_iters)
)
ProgressBar(persist=True, bar_format="").attach(
trainer, event_name=Events.EPOCH_STARTED, closing_event_name=Events.COMPLETED
)
def _setup_common_distrib_training_handlers(
trainer,
train_sampler=None,
to_save=None,
save_every_iters=1000,
output_path=None,
lr_scheduler=None,
with_gpu_stats=True,
output_names=None,
with_pbars=True,
with_pbar_on_iters=True,
log_every_iters=100,
device="cuda",
):
if not (dist.is_available() and dist.is_initialized()):
raise RuntimeError("Distributed setting is not initialized, please call `dist.init_process_group` before.")
_setup_common_training_handlers(
trainer,
to_save=None,
lr_scheduler=lr_scheduler,
with_gpu_stats=with_gpu_stats,
output_names=output_names,
with_pbars=(dist.get_rank() == 0) and with_pbars,
with_pbar_on_iters=with_pbar_on_iters,
log_every_iters=log_every_iters,
device=device,
)
if train_sampler is not None:
if not callable(getattr(train_sampler, "set_epoch", None)):
raise TypeError("Train sampler should have `set_epoch` method")
@trainer.on(Events.EPOCH_STARTED)
def distrib_set_epoch(engine):
train_sampler.set_epoch(engine.state.epoch - 1)
if dist.get_rank() == 0:
if to_save is not None:
if output_path is None:
raise ValueError("If to_save argument is provided then output_path argument should be also defined")
checkpoint_handler = ModelCheckpoint(dirname=output_path, filename_prefix="training")
trainer.add_event_handler(Events.ITERATION_COMPLETED(every=save_every_iters), checkpoint_handler, to_save)
def empty_cuda_cache(_):
torch.cuda.empty_cache()
import gc
gc.collect()
def setup_any_logging(logger, logger_module, trainer, optimizers, evaluators, log_every_iters):
if optimizers is not None:
from torch.optim.optimizer import Optimizer
if not isinstance(optimizers, (Optimizer, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if evaluators is not None:
if not isinstance(evaluators, (Engine, Mapping)):
raise TypeError("Argument optimizers should be either a single optimizer or a dictionary or optimizers")
if log_every_iters is None:
log_every_iters = 1
logger.attach(
trainer,
log_handler=logger_module.OutputHandler(tag="training", metric_names="all"),
event_name=Events.ITERATION_COMPLETED(every=log_every_iters),
)
if optimizers is not None:
# Log optimizer parameters
if isinstance(optimizers, Optimizer):
optimizers = {None: optimizers}
for k, optimizer in optimizers.items():
logger.attach(
trainer,
log_handler=logger_module.OptimizerParamsHandler(optimizer, param_name="lr", tag=k),
event_name=Events.ITERATION_STARTED(every=log_every_iters),
)
if evaluators is not None:
# Log evaluation metrics
if isinstance(evaluators, Engine):
evaluators = {"validation": evaluators}
for k, evaluator in evaluators.items():
gst = global_step_from_engine(trainer)
logger.attach(
evaluator,
log_handler=logger_module.OutputHandler(tag=k, metric_names="all", global_step_transform=gst),
event_name=Events.COMPLETED,
)
def setup_tb_logging(output_path, trainer, optimizers=None, evaluators=None, log_every_iters=100):
"""Method to setup TensorBoard logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
output_path (str): logging directory path
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
Returns:
TensorboardLogger
"""
tb_logger = TensorboardLogger(log_dir=output_path)
setup_any_logging(tb_logger, tb_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters)
return tb_logger
def setup_visdom_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100, **kwargs):
"""Method to setup Visdom logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
**kwargs: kwargs to pass into VisdomLogger
Returns:
VisdomLogger
"""
vis_logger = VisdomLogger(**kwargs)
setup_any_logging(
vis_logger, visdom_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return vis_logger
def setup_mlflow_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
Returns:
MLflowLogger
"""
mlflow_logger = MLflowLogger()
setup_any_logging(
mlflow_logger, mlflow_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return mlflow_logger
def setup_plx_logging(trainer, optimizers=None, evaluators=None, log_every_iters=100):
"""Method to setup MLflow logging on trainer and a list of evaluators. Logged metrics are:
- Training metrics, e.g. running average loss values
- Learning rate(s)
- Evaluation metrics
Args:
trainer (Engine): trainer engine
optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of
torch optimizers. If a dictionary, keys are used as tags arguments for logging.
evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary,
keys are used as tags arguments for logging.
log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration,
value can be set to 1 or None.
Returns:
PolyaxonLogger
"""
plx_logger = PolyaxonLogger()
setup_any_logging(
plx_logger, polyaxon_logger_module, trainer, optimizers, evaluators, log_every_iters=log_every_iters
)
return plx_logger
def get_default_score_fn(metric_name):
def wrapper(engine):
score = engine.state.metrics[metric_name]
return score
return wrapper
def save_best_model_by_val_score(output_path, evaluator, model, metric_name, n_saved=3, trainer=None, tag="val"):
"""Method adds a handler to `evaluator` to save best models based on the score (named by `metric_name`)
provided by `evaluator`.
Args:
output_path (str): output path to indicate where to save best models
evaluator (Engine): evaluation engine used to provide the score
model (nn.Module): model to store
metric_name (str): metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
n_saved (int, optional): number of best models to store
trainer (Engine, optional): trainer engine to fetch the epoch when saving the best model.
tag (str, optional): score name prefix: `{tag}_{metric_name}`. By default, tag is "val".
"""
global_step_transform = None
if trainer is not None:
global_step_transform = global_step_from_engine(trainer)
best_model_handler = ModelCheckpoint(
dirname=output_path,
filename_prefix="best",
n_saved=n_saved,
global_step_transform=global_step_transform,
score_name="{}_{}".format(tag, metric_name.lower()),
score_function=get_default_score_fn(metric_name),
)
evaluator.add_event_handler(Events.COMPLETED, best_model_handler, {"model": model,})
def add_early_stopping_by_val_score(patience, evaluator, trainer, metric_name):
"""Method setups early stopping handler based on the score (named by `metric_name`) provided by `evaluator`.
Args:
patience (int): number of events to wait if no improvement and then stop the training.
evaluator (Engine): evaluation engine used to provide the score
trainer (Engine): trainer engine to stop the run if no improvement.
metric_name (str): metric name to use for score evaluation. This metric should be present in
`evaluator.state.metrics`.
"""
es_handler = EarlyStopping(patience=patience, score_function=get_default_score_fn(metric_name), trainer=trainer)
evaluator.add_event_handler(Events.COMPLETED, es_handler)
| 42.061576 | 119 | 0.701997 |
f71c77d1c0f627d4c0d8120689ae89c7e1a43d86 | 2,577 | py | Python | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-12-07T21:02:20.000Z | 2019-02-22T14:36:31.000Z | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 43 | 2018-11-30T11:31:43.000Z | 2019-04-03T16:09:06.000Z | agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py | cicorias/agogosml | 60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1 | [
"MIT"
] | 13 | 2018-11-29T00:31:29.000Z | 2019-02-22T18:50:28.000Z | import json
import os
import sys
import time
from agogosml.common.abstract_streaming_client import find_streaming_clients
from agogosml.tools.sender import send
from agogosml.tools.receiver import receive
eh_base_config = {
"EVENT_HUB_NAMESPACE": os.getenv("EVENT_HUB_NAMESPACE"),
"EVENT_HUB_NAME": os.getenv("EVENT_HUB_NAME_INPUT"),
"EVENT_HUB_SAS_POLICY": os.getenv("EVENT_HUB_SAS_POLICY"),
"EVENT_HUB_SAS_KEY": os.getenv("EVENT_HUB_SAS_KEY_INPUT"),
}
eh_send_config = {
**eh_base_config,
'LEASE_CONTAINER_NAME': os.getenv('LEASE_CONTAINER_NAME_INPUT')
}
eh_receive_config = {
**eh_base_config,
"AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"),
"AZURE_STORAGE_ACCESS_KEY": os.getenv("AZURE_STORAGE_ACCESS_KEY"),
"LEASE_CONTAINER_NAME": os.getenv("LEASE_CONTAINER_NAME_OUTPUT"),
"EVENT_HUB_CONSUMER_GROUP": os.getenv("EVENT_HUB_CONSUMER_GROUP"),
"TIMEOUT": 10,
}
kafka_base_config = {
'KAFKA_ADDRESS': os.getenv("KAFKA_ADDRESS"),
'TIMEOUT': os.getenv('KAFKA_TIMEOUT'),
# These configs are specific to Event Hub Head for Kafka
'EVENTHUB_KAFKA_CONNECTION_STRING': os.getenv('EVENTHUB_KAFKA_CONNECTION_STRING'),
'SSL_CERT_LOCATION': os.getenv('SSL_CERT_LOCATION') # /usr/local/etc/openssl/cert.pem
}
kafka_receive_config = {
**kafka_base_config,
'KAFKA_CONSUMER_GROUP': os.getenv('KAFKA_CONSUMER_GROUP'),
}
kafka_send_config = {
**kafka_base_config,
'KAFKA_TOPIC': os.getenv('KAFKA_TOPIC_INPUT')
}
def put_messages_on_input_queue(msg_type: str):
with open('test_messages.json', encoding='utf-8') as f:
test_messages = json.load(f)
send_client = find_streaming_clients()[msg_type]
send_config = {**eh_send_config, **kafka_send_config}
send(test_messages, send_client, send_config)
def receive_messages_on_queue(kafka_topic: str, msg_type: str):
receive_client = find_streaming_clients()[msg_type]
receive_config = {**eh_receive_config, **kafka_receive_config, **{'KAFKA_TOPIC': os.getenv(kafka_topic)}}
return receive(sys.stdout, receive_client, receive_config)
def cli():
msg_type = os.getenv("MESSAGING_TYPE")
put_messages_on_input_queue(msg_type)
time.sleep(3)
input_received = receive_messages_on_queue('KAFKA_TOPIC_INPUT', msg_type)
print(input_received)
time.sleep(20)
output_received = receive_messages_on_queue('KAFKA_TOPIC_OUTPUT', msg_type)
print(output_received)
if output_received == "[]":
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
cli()
| 28.955056 | 109 | 0.73962 |
f71c8d37ae326e29cdf957282fbbe1c51cf54ac4 | 1,004 | py | Python | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | null | null | null | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | 4 | 2020-10-09T15:59:09.000Z | 2020-11-18T08:34:44.000Z | src/slack.py | villoro/airflow_tasks | 81bd892744a9bbbf6e01903649b6c3786a955a5a | [
"MIT"
] | null | null | null | import json
import requests
from utils import get_secret
from utils import is_pro
def send_slack(text="", channel="test", blocks=None):
assert channel in ["test", "events", "general"]
webhook = get_secret(f"SLACK_WEBHOOK_{channel.upper()}")
data = {"text": text}
if blocks:
data["blocks"] = blocks
res = requests.post(
webhook, data=json.dumps(data), headers={"Content-Type": "application/json"}
)
res.raise_for_status()
def slack_state_handler(task, old_state, new_state):
if not new_state.is_finished():
return new_state
failure = new_state.is_failed()
# Prepare message
if failure:
msg = f"*{task.name}:* :x:"
else:
msg = f"*{task.name}:* {task.duration} :heavy_check_mark:"
# Notify result
send_slack(msg, channel="events" if is_pro() else "test")
# In pro notify about failures in general
if failure and is_pro():
send_slack(msg, channel="general")
return new_state
| 21.361702 | 84 | 0.644422 |
f71c8f6aa2a62ab271f35e5e3080e58ef457c6cb | 782 | py | Python | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | examples/kmeansHeightWeight.py | Duane321/pyprobml | 6d0ba29f22dc7fec9dfc73788bc5520e97663bdb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import matplotlib.pyplot as pl
import numpy as np
from utils import util
from sklearn.cluster import KMeans
from utils.util import save_fig
data = util.load_mat('heightWeight/heightWeight')
data = data['heightWeightData']
markers = 'Dox'
colors = 'rgb'
for i in range(3):
KM_model = KMeans(init='k-means++', n_clusters=i+1)
labels = KM_model.fit_predict(data[:, [1, 2]])
labels_unique = np.unique(labels)
fig = pl.figure(i)
for j in range(len(labels_unique)):
data_chosen = data[labels == labels_unique[j]]
pl.scatter(data_chosen[:, 1], data_chosen[:, 2],
marker=markers[j],
color=colors[j])
pl.title('k = %s' % (i+1))
save_fig('kmeansHeightWeight_%s.png' % (i+1))
pl.show()
| 28.962963 | 56 | 0.644501 |
f71d2652af9afa17cad4beb33592ed002af1f665 | 4,040 | py | Python | alipay/aop/api/request/AlipayCommerceEducateCampusBiztaskFinishRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayCommerceEducateCampusBiztaskFinishRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AlipayCommerceEducateCampusBiztaskFinishRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayCommerceEducateCampusBiztaskFinishModel import AlipayCommerceEducateCampusBiztaskFinishModel
class AlipayCommerceEducateCampusBiztaskFinishRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayCommerceEducateCampusBiztaskFinishModel):
self._biz_content = value
else:
self._biz_content = AlipayCommerceEducateCampusBiztaskFinishModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.commerce.educate.campus.biztask.finish'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 27.862069 | 148 | 0.650495 |
f71d6f2d262c160937a5abfacbcec8080a0ea116 | 228 | py | Python | algorithms/tree_diameter.py | nazaruka/Catan-AI | d7c91c7011ac82ad41d3b80a29c73c23ab7b579e | [
"MIT"
] | 9 | 2019-07-16T15:28:00.000Z | 2022-02-08T00:53:39.000Z | algorithms/tree_diameter.py | nazaruka/Catan-AI | d7c91c7011ac82ad41d3b80a29c73c23ab7b579e | [
"MIT"
] | 2 | 2020-01-09T15:27:58.000Z | 2020-09-23T18:04:41.000Z | algorithms/tree_diameter.py | nazaruka/Catan-AI | d7c91c7011ac82ad41d3b80a29c73c23ab7b579e | [
"MIT"
] | 5 | 2019-08-07T16:51:38.000Z | 2021-05-06T04:01:32.000Z | import networkx
from algorithms.dfs import dfs
def tree_diameter(t: networkx.Graph):
if __debug__:
assert networkx.is_tree(t)
v, _ = dfs(t)
_, longest_path_length = dfs(t, v)
return longest_path_length
| 20.727273 | 38 | 0.697368 |
f71d7ff644e054571c43b78fa96b7e2e5f88fb9d | 22,213 | py | Python | sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py | T3p/baselines | 5623c9160d1e86ebca3e673f142fe6b14a1db06c | [
"MIT"
] | 2 | 2020-08-01T18:19:05.000Z | 2021-06-30T06:37:23.000Z | sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py | T3p/baselines | 5623c9160d1e86ebca3e673f142fe6b14a1db06c | [
"MIT"
] | null | null | null | sacred_mis/_sources/pomis2_57be95a71b575624c33c6ffe64e50d6e.py | T3p/baselines | 5623c9160d1e86ebca3e673f142fe6b14a1db06c | [
"MIT"
] | 5 | 2018-11-24T16:29:39.000Z | 2021-12-10T03:07:07.000Z | import numpy as np
import warnings
import baselines.common.tf_util as U
import tensorflow as tf
import time
from baselines.common import zipsame, colorize
from contextlib import contextmanager
from collections import deque
from baselines import logger
from baselines.common.cg import cg
from baselines.pomis2.memory import Memory
from baselines.common.centralized_sampler import traj_segment_generator
from baselines.pois.utils import cluster_rewards
@contextmanager
def timed(msg):
print(colorize(msg, color='magenta'))
tstart = time.time()
yield
print(colorize('done in %.3f seconds'%(time.time() - tstart), color='magenta'))
def update_epsilon(delta_bound, epsilon_old, max_increase=2.):
if delta_bound > (1. - 1. / (2 * max_increase)) * epsilon_old:
return epsilon_old * max_increase
else:
return epsilon_old ** 2 / (2 * (epsilon_old - delta_bound))
def line_search_parabola(theta_init, alpha, natural_gradient, set_parameter, evaluate_bound, delta_bound_tol=1e-4, max_line_search_ite=30):
epsilon = 1.
epsilon_old = 0.
delta_bound_old = -np.inf
bound_init = evaluate_bound()
theta_old = theta_init
for i in range(max_line_search_ite):
theta = theta_init + epsilon * alpha * natural_gradient
set_parameter(theta)
bound = evaluate_bound()
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
return theta_old, epsilon_old, delta_bound_old, i + 1
delta_bound = bound - bound_init
epsilon_old = epsilon
epsilon = update_epsilon(delta_bound, epsilon_old)
if delta_bound <= delta_bound_old + delta_bound_tol:
if delta_bound_old < 0.:
return theta_init, 0., 0., i+1
else:
return theta_old, epsilon_old, delta_bound_old, i+1
delta_bound_old = delta_bound
theta_old = theta
return theta_old, epsilon_old, delta_bound_old, i+1
def line_search_binary(theta_init, alpha, natural_gradient, set_parameter, evaluate_loss, delta_bound_tol=1e-4, max_line_search_ite=30):
low = 0.
high = None
bound_init = evaluate_loss()
delta_bound_old = 0.
theta_opt = theta_init
i_opt = 0
delta_bound_opt = 0.
epsilon_opt = 0.
epsilon = 1.
for i in range(max_line_search_ite):
theta = theta_init + epsilon * natural_gradient * alpha
set_parameter(theta)
bound = evaluate_loss()
delta_bound = bound - bound_init
if np.isnan(bound):
warnings.warn('Got NaN bound value: rolling back!')
if np.isnan(bound) or delta_bound <= delta_bound_opt:
high = epsilon
else:
low = epsilon
theta_opt = theta
delta_bound_opt = delta_bound
i_opt = i
epsilon_opt = epsilon
epsilon_old = epsilon
if high is None:
epsilon *= 2
else:
epsilon = (low + high) / 2.
if abs(epsilon_old - epsilon) < 1e-12:
break
return theta_opt, epsilon_opt, delta_bound_opt, i_opt+1
def optimize_offline(theta_init, set_parameter, line_search, evaluate_loss, evaluate_gradient, evaluate_natural_gradient=None, gradient_tol=1e-4, bound_tol=1e-4, max_offline_ite=100):
theta = theta_old = theta_init
improvement = improvement_old = 0.
set_parameter(theta)
'''
bound_init = evaluate_loss()
import scipy.optimize as opt
def func(x):
set_parameter(x)
return -evaluate_loss()
def grad(x):
set_parameter(x)
return -evaluate_gradient().astype(np.float64)
theta, bound, d = opt.fmin_l_bfgs_b(func=func,
fprime=grad,
x0=theta_init.astype(np.float64),
maxiter=100,
)
print(bound_init, bound)
print(d)
set_parameter(theta)
improvement = bound_init + bound
return theta, improvement
'''
fmtstr = '%6i %10.3g %10.3g %18i %18.3g %18.3g %18.3g'
titlestr = '%6s %10s %10s %18s %18s %18s %18s'
print(titlestr % ('iter', 'epsilon', 'step size', 'num line search', 'gradient norm', 'delta bound ite', 'delta bound tot'))
for i in range(max_offline_ite):
bound = evaluate_loss()
gradient = evaluate_gradient()
if np.any(np.isnan(gradient)):
warnings.warn('Got NaN gradient! Stopping!')
set_parameter(theta_old)
return theta_old, improvement
if np.isnan(bound):
warnings.warn('Got NaN bound! Stopping!')
set_parameter(theta_old)
return theta_old, improvement_old
if evaluate_natural_gradient is not None:
natural_gradient = evaluate_natural_gradient(gradient)
else:
natural_gradient = gradient
if np.dot(gradient, natural_gradient) < 0:
warnings.warn('NatGradient dot Gradient < 0! Using vanilla gradient')
natural_gradient = gradient
gradient_norm = np.sqrt(np.dot(gradient, natural_gradient))
if gradient_norm < gradient_tol:
print('stopping - gradient norm < gradient_tol')
return theta, improvement
alpha = 1. / gradient_norm ** 2
theta_old = theta
improvement_old = improvement
theta, epsilon, delta_bound, num_line_search = line_search(theta, alpha, natural_gradient, set_parameter, evaluate_loss)
set_parameter(theta)
improvement += delta_bound
print(fmtstr % (i+1, epsilon, alpha*epsilon, num_line_search, gradient_norm, delta_bound, improvement))
if delta_bound < bound_tol:
print('stopping - delta bound < bound_tol')
return theta, improvement
return theta, improvement
def learn(env, make_policy, *,
n_episodes,
horizon,
delta,
gamma,
max_iters,
sampler=None,
use_natural_gradient=False, #can be 'exact', 'approximate'
fisher_reg=1e-2,
iw_method='is',
iw_norm='none',
bound='J',
line_search_type='parabola',
save_weights=0,
improvement_tol=0.,
center_return=False,
render_after=None,
max_offline_iters=100,
callback=None,
clipping=False,
entropy='none',
positive_return=False,
reward_clustering='none',
capacity=10,
warm_start=True):
np.set_printoptions(precision=3)
max_samples = horizon * n_episodes
if line_search_type == 'binary':
line_search = line_search_binary
elif line_search_type == 'parabola':
line_search = line_search_parabola
else:
raise ValueError()
# Building the environment
ob_space = env.observation_space
ac_space = env.action_space
# Creating the memory buffer
memory = Memory(capacity=capacity, batch_size=n_episodes, horizon=horizon,
ob_space=ob_space, ac_space=ac_space)
# Building the target policy and saving its parameters
pi = make_policy('pi', ob_space, ac_space)
all_var_list = pi.get_trainable_variables()
var_list = [v for v in all_var_list if v.name.split('/')[1].startswith('pol')]
shapes = [U.intprod(var.get_shape().as_list()) for var in var_list]
n_parameters = sum(shapes)
# Building a set of behavioral policies
behavioral_policies = memory.build_policies(make_policy, pi)
# Placeholders
ob_ = ob = U.get_placeholder_cached(name='ob')
ac_ = pi.pdtype.sample_placeholder([None], name='ac')
mask_ = tf.placeholder(dtype=tf.float32, shape=(None), name='mask')
rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='rew')
disc_rew_ = tf.placeholder(dtype=tf.float32, shape=(None), name='disc_rew')
clustered_rew_ = tf.placeholder(dtype=tf.float32, shape=(None))
gradient_ = tf.placeholder(dtype=tf.float32, shape=(n_parameters, 1), name='gradient')
iter_number_ = tf.placeholder(dtype=tf.int32, name='iter_number')
active_policies = tf.placeholder(dtype=tf.float32, shape=(capacity), name='active_policies')
losses_with_name = []
# Total number of trajectories
N_total = tf.reduce_sum(active_policies) * n_episodes
# Split operations
disc_rew_split = tf.reshape(disc_rew_ * mask_, [-1, horizon])
rew_split = tf.reshape(rew_ * mask_, [-1, horizon])
mask_split = tf.reshape(mask_, [-1, horizon])
# Policy densities
target_log_pdf = pi.pd.logp(ac_) * mask_
target_log_pdf_split = tf.reshape(target_log_pdf, [-1, horizon])
behavioral_log_pdfs = tf.stack([bpi.pd.logp(ac_) * mask_ for bpi in memory.policies]) # Shape is (capacity, ntraj*horizon)
behavioral_log_pdfs_split = tf.reshape(behavioral_log_pdfs, [memory.capacity, -1, horizon])
# Compute renyi divergencies and sum over time, then exponentiate
emp_d2_split = tf.reshape(tf.stack([pi.pd.renyi(bpi.pd, 2) * mask_ for bpi in memory.policies]), [memory.capacity, -1, horizon])
emp_d2_split_cum = tf.exp(tf.reduce_sum(emp_d2_split, axis=2))
# Compute arithmetic and harmonic mean of emp_d2
emp_d2_mean = tf.reduce_mean(emp_d2_split_cum, axis=1)
emp_d2_arithmetic = tf.reduce_sum(emp_d2_mean * active_policies) / tf.reduce_sum(active_policies)
emp_d2_harmonic = tf.reduce_sum(active_policies) / tf.reduce_sum(1 / emp_d2_mean)
# Return processing: clipping, centering, discounting
ep_return = clustered_rew_ #tf.reduce_sum(mask_split * disc_rew_split, axis=1)
if clipping:
rew_split = tf.clip_by_value(rew_split, -1, 1)
if center_return:
ep_return = ep_return - tf.reduce_mean(ep_return)
rew_split = rew_split - (tf.reduce_sum(rew_split) / (tf.reduce_sum(mask_split) + 1e-24))
discounter = [pow(gamma, i) for i in range(0, horizon)] # Decreasing gamma
discounter_tf = tf.constant(discounter)
disc_rew_split = rew_split * discounter_tf
# Reward statistics
return_mean = tf.reduce_mean(ep_return)
return_std = U.reduce_std(ep_return)
return_max = tf.reduce_max(ep_return)
return_min = tf.reduce_min(ep_return)
return_abs_max = tf.reduce_max(tf.abs(ep_return))
return_step_max = tf.reduce_max(tf.abs(rew_split)) # Max step reward
return_step_mean = tf.abs(tf.reduce_mean(rew_split))
positive_step_return_max = tf.maximum(0.0, tf.reduce_max(rew_split))
negative_step_return_max = tf.maximum(0.0, tf.reduce_max(-rew_split))
return_step_maxmin = tf.abs(positive_step_return_max - negative_step_return_max)
losses_with_name.extend([(return_mean, 'InitialReturnMean'),
(return_max, 'InitialReturnMax'),
(return_min, 'InitialReturnMin'),
(return_std, 'InitialReturnStd'),
(emp_d2_arithmetic, 'EmpiricalD2Arithmetic'),
(emp_d2_harmonic, 'EmpiricalD2Harmonic'),
(return_step_max, 'ReturnStepMax'),
(return_step_maxmin, 'ReturnStepMaxmin')])
if iw_method == 'is':
# Sum the log prob over time. Shapes: target(Nep, H), behav (Cap, Nep, H)
target_log_pdf_episode = tf.reduce_sum(target_log_pdf_split, axis=1)
behavioral_log_pdf_episode = tf.reduce_sum(behavioral_log_pdfs_split, axis=2)
# To avoid numerical instability, compute the inversed ratio
log_ratio = target_log_pdf_split - behavioral_log_pdfs_split
inverse_log_ratio_episode = - tf.reduce_sum(log_ratio, axis=2)
iw = 1 / tf.reduce_sum(tf.exp(inverse_log_ratio_episode) * tf.expand_dims(active_policies, -1), axis=0)
# Compute also the balance-heuristic weights
iw_split = tf.reshape(iw, (memory.capacity, -1))
iw_by_behavioral = tf.reduce_mean(iw_split, axis=1)
losses_with_name.append((iw_by_behavioral[0] / tf.reduce_sum(iw_by_behavioral), 'MultiIWFirstRatio'))
losses_with_name.append((tf.reduce_max(iw_by_behavioral), 'MultiIWMax'))
losses_with_name.append((tf.reduce_sum(iw_by_behavioral), 'MultiIWSum'))
losses_with_name.append((tf.reduce_min(iw_by_behavioral), 'MultiIWMin'))
# Get the probability by exponentiation
#target_pdf_episode = tf.exp(target_log_pdf_episode)
#behavioral_pdf_episode = tf.exp(behavioral_log_pdf_episode)
# Get the denominator by averaging over behavioral policies
#behavioral_pdf_mixture = tf.reduce_mean(behavioral_pdf_episode, axis=0) + 1e-24
#iw = target_pdf_episode / behavioral_pdf_mixture
iwn = iw / n_episodes
# Compute the J
w_return_mean = tf.reduce_sum(ep_return * iwn)
# Empirical D2 of the mixture and relative ESS
ess_renyi_arithmetic = N_total / emp_d2_arithmetic
ess_renyi_harmonic = N_total / emp_d2_harmonic
# Log quantities
losses_with_name.extend([(tf.reduce_max(iw), 'MaxIW'),
(tf.reduce_min(iw), 'MinIW'),
(tf.reduce_mean(iw), 'MeanIW'),
(U.reduce_std(iw), 'StdIW'),
(tf.reduce_min(target_log_pdf_episode), 'MinTargetPdf'),
(tf.reduce_min(behavioral_log_pdf_episode), 'MinBehavPdf'),
(ess_renyi_arithmetic, 'ESSRenyiArithmetic'),
(ess_renyi_harmonic, 'ESSRenyiHarmonic')])
else:
raise NotImplementedError()
if bound == 'J':
bound_ = w_return_mean
elif bound == 'max-d2-harmonic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_harmonic)) * return_abs_max
elif bound == 'max-d2-arithmetic':
bound_ = w_return_mean - tf.sqrt((1 - delta) / (delta * ess_renyi_arithmetic)) * return_abs_max
else:
raise NotImplementedError()
# Policy entropy for exploration
ent = pi.pd.entropy()
meanent = tf.reduce_mean(ent)
losses_with_name.append((meanent, 'MeanEntropy'))
# Add policy entropy bonus
if entropy != 'none':
scheme, v1, v2 = entropy.split(':')
if scheme == 'step':
entcoeff = tf.cond(iter_number_ < int(v2), lambda: float(v1), lambda: float(0.0))
losses_with_name.append((entcoeff, 'EntropyCoefficient'))
entbonus = entcoeff * meanent
bound_ = bound_ + entbonus
elif scheme == 'lin':
ip = tf.cast(iter_number_ / max_iters, tf.float32)
entcoeff_decay = tf.maximum(0.0, float(v2) + (float(v1) - float(v2)) * (1.0 - ip))
losses_with_name.append((entcoeff_decay, 'EntropyCoefficient'))
entbonus = entcoeff_decay * meanent
bound_ = bound_ + entbonus
elif scheme == 'exp':
ent_f = tf.exp(-tf.abs(tf.reduce_mean(iw) - 1) * float(v2)) * float(v1)
losses_with_name.append((ent_f, 'EntropyCoefficient'))
bound_ = bound_ + ent_f * meanent
else:
raise Exception('Unrecognized entropy scheme.')
losses_with_name.append((w_return_mean, 'ReturnMeanIW'))
losses_with_name.append((bound_, 'Bound'))
losses, loss_names = map(list, zip(*losses_with_name))
'''
if use_natural_gradient:
p = tf.placeholder(dtype=tf.float32, shape=[None])
target_logpdf_episode = tf.reduce_sum(target_log_pdf_split * mask_split, axis=1)
grad_logprob = U.flatgrad(tf.stop_gradient(iwn) * target_logpdf_episode, var_list)
dot_product = tf.reduce_sum(grad_logprob * p)
hess_logprob = U.flatgrad(dot_product, var_list)
compute_linear_operator = U.function([p, ob_, ac_, disc_rew_, mask_], [-hess_logprob])
'''
assert_ops = tf.group(*tf.get_collection('asserts'))
print_ops = tf.group(*tf.get_collection('prints'))
compute_lossandgrad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses + [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_grad = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [U.flatgrad(bound_, var_list), assert_ops, print_ops])
compute_bound = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [bound_, assert_ops, print_ops])
compute_losses = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], losses)
#compute_temp = U.function([ob_, ac_, rew_, disc_rew_, clustered_rew_, mask_, iter_number_, active_policies], [log_inverse_ratio, abc, iw])
set_parameter = U.SetFromFlat(var_list)
get_parameter = U.GetFlat(var_list)
policy_reinit = tf.variables_initializer(var_list)
if sampler is None:
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=gamma)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
U.initialize()
# Starting optimizing
episodes_so_far = 0
timesteps_so_far = 0
iters_so_far = 0
tstart = time.time()
lenbuffer = deque(maxlen=n_episodes)
rewbuffer = deque(maxlen=n_episodes)
while True:
iters_so_far += 1
if iters_so_far == 50:
print('=== CHANGED GAMMA TO 1.0')
seg_gen = traj_segment_generator(pi, env, n_episodes, horizon, stochastic=True, gamma=1.0)
sampler = type("SequentialSampler", (object,), {"collect": lambda self, _: seg_gen.__next__()})()
if render_after is not None and iters_so_far % render_after == 0:
if hasattr(env, 'render'):
render(env, pi, horizon)
if callback:
callback(locals(), globals())
if iters_so_far >= max_iters:
print('Finished...')
break
logger.log('********** Iteration %i ************' % iters_so_far)
theta = get_parameter()
with timed('sampling'):
seg = sampler.collect(theta)
lens, rets = seg['ep_lens'], seg['ep_rets']
lenbuffer.extend(lens)
rewbuffer.extend(rets)
episodes_so_far += len(lens)
timesteps_so_far += sum(lens)
# Adding batch of trajectories to memory
memory.add_trajectory_batch(seg)
# Get multiple batches from memory
seg_with_memory = memory.get_trajectories()
# Get clustered reward
reward_matrix = np.reshape(seg_with_memory['disc_rew'] * seg_with_memory['mask'], (-1, horizon))
ep_reward = np.sum(reward_matrix, axis=1)
ep_reward = cluster_rewards(ep_reward, reward_clustering)
args = ob, ac, rew, disc_rew, clustered_rew, mask, iter_number, active_policies = (seg_with_memory['ob'],
seg_with_memory['ac'],
seg_with_memory['rew'],
seg_with_memory['disc_rew'],
ep_reward,
seg_with_memory['mask'],
iters_so_far,
memory.get_active_policies_mask())
def evaluate_loss():
loss = compute_bound(*args)
return loss[0]
def evaluate_gradient():
gradient = compute_grad(*args)
return gradient[0]
if use_natural_gradient:
def evaluate_fisher_vector_prod(x):
return compute_linear_operator(x, *args)[0] + fisher_reg * x
def evaluate_natural_gradient(g):
return cg(evaluate_fisher_vector_prod, g, cg_iters=10, verbose=0)
else:
evaluate_natural_gradient = None
with timed('summaries before'):
logger.record_tabular("Iteration", iters_so_far)
logger.record_tabular("InitialBound", evaluate_loss())
logger.record_tabular("EpLenMean", np.mean(lenbuffer))
logger.record_tabular("EpRewMean", np.mean(rewbuffer))
logger.record_tabular("EpThisIter", len(lens))
logger.record_tabular("EpisodesSoFar", episodes_so_far)
logger.record_tabular("TimestepsSoFar", timesteps_so_far)
logger.record_tabular("TimeElapsed", time.time() - tstart)
if save_weights > 0 and iters_so_far % save_weights == 0:
logger.record_tabular('Weights', str(get_parameter()))
import pickle
file = open('checkpoint' + str(iters_so_far) + '.pkl', 'wb')
pickle.dump(theta, file)
if not warm_start or memory.get_current_load() == capacity:
# Optimize
with timed("offline optimization"):
theta, improvement = optimize_offline(theta,
set_parameter,
line_search,
evaluate_loss,
evaluate_gradient,
evaluate_natural_gradient,
max_offline_ite=max_offline_iters)
set_parameter(theta)
print(theta)
with timed('summaries after'):
meanlosses = np.array(compute_losses(*args))
for (lossname, lossval) in zip(loss_names, meanlosses):
logger.record_tabular(lossname, lossval)
else:
# Reinitialize the policy
tf.get_default_session().run(policy_reinit)
logger.dump_tabular()
env.close()
| 41.597378 | 183 | 0.620267 |
f71d84bd199bff2d65b7447164541e907a3f533b | 3,063 | py | Python | weasyl/blocktag.py | Weasyl/weasyl | 80c86942c6f20a815086e2895fdad51d3aa77eed | [
"Apache-2.0"
] | 111 | 2016-05-18T04:18:18.000Z | 2021-11-03T02:05:19.000Z | weasyl/blocktag.py | Weasyl/weasyl | 80c86942c6f20a815086e2895fdad51d3aa77eed | [
"Apache-2.0"
] | 1,103 | 2016-05-29T05:17:53.000Z | 2022-03-31T18:12:40.000Z | weasyl/blocktag.py | Weasyl/weasyl | 80c86942c6f20a815086e2895fdad51d3aa77eed | [
"Apache-2.0"
] | 47 | 2016-05-29T20:48:37.000Z | 2021-11-12T09:40:40.000Z | from libweasyl import ratings
from libweasyl.cache import region
from weasyl import define as d
from weasyl import profile
from weasyl import searchtag
# For blocked tags, `rating` refers to the lowest rating for which that tag is
# blocked; for example, (X, Y, 10) would block tag Y for all ratings, whereas
# (X, Y, 30) would block tag Y for only adult ratings.
def check(userid, submitid=None, charid=None, journalid=None):
"""
Returns True if the submission, character, or journal contains a search tag
that the user has blocked, else False.
"""
if not userid:
return False
if submitid:
map_table = "searchmapsubmit"
content_table = "submission"
id_field = "submitid"
target = submitid
elif charid:
map_table = "searchmapchar"
content_table = "character"
id_field = "charid"
target = charid
else:
map_table = "searchmapjournal"
content_table = "journal"
id_field = "journalid"
target = journalid
query = """
SELECT EXISTS (
SELECT 0 FROM {map_table} searchmap
INNER JOIN {content_table} content ON searchmap.targetid = content.{id_field}
WHERE searchmap.targetid = %(id)s
AND content.userid != %(user)s
AND searchmap.tagid IN (
SELECT blocktag.tagid FROM blocktag
WHERE userid = %(user)s AND blocktag.rating <= content.rating)) AS block
""".format(map_table=map_table, content_table=content_table, id_field=id_field)
return d.engine.execute(query, id=target, user=userid).first().block
def check_list(rating, tags, blocked_tags):
return any(rating >= b['rating'] and b['tagid'] in tags for b in blocked_tags)
def select(userid):
return [{
"title": i[0],
"rating": i[1],
} for i in d.execute("SELECT st.title, bt.rating FROM searchtag st "
" INNER JOIN blocktag bt ON st.tagid = bt.tagid"
" WHERE bt.userid = %i"
" ORDER BY st.title", [userid])]
@region.cache_on_arguments()
@d.record_timing
def select_ids(userid):
return [
dict(row)
for row in d.engine.execute(
'SELECT tagid, rating FROM blocktag WHERE userid = %(user)s',
user=userid)
]
def insert(userid, title, rating):
if rating not in ratings.CODE_MAP:
rating = ratings.GENERAL.code
profile.check_user_rating_allowed(userid, rating)
d.engine.execute(
'INSERT INTO blocktag (userid, tagid, rating) VALUES (%(user)s, %(tag)s, %(rating)s) ON CONFLICT DO NOTHING',
user=userid, tag=searchtag.get_or_create(title), rating=rating)
select_ids.invalidate(userid)
def remove(userid, title):
d.engine.execute(
"DELETE FROM blocktag WHERE (userid, tagid) = (%(user)s, (SELECT tagid FROM searchtag WHERE title = %(tag)s))",
user=userid,
tag=d.get_search_tag(title),
)
select_ids.invalidate(userid)
| 31.57732 | 119 | 0.627163 |
f71d86f566cba857829800b557598795af8fd8ab | 1,145 | py | Python | irc_hooky/base_object.py | byorgey/irc-hooky | e78942b7e13ce273c40815863d0384dddfa52243 | [
"MIT"
] | 19 | 2016-01-26T18:36:38.000Z | 2022-03-12T02:32:01.000Z | irc_hooky/base_object.py | byorgey/irc-hooky | e78942b7e13ce273c40815863d0384dddfa52243 | [
"MIT"
] | 3 | 2016-01-29T19:43:25.000Z | 2019-03-11T20:21:11.000Z | irc_hooky/base_object.py | byorgey/irc-hooky | e78942b7e13ce273c40815863d0384dddfa52243 | [
"MIT"
] | 2 | 2016-03-01T09:23:07.000Z | 2020-04-01T21:53:51.000Z | from abc import ABCMeta
import logging
import json
class BaseObject(object):
__metaclass__ = ABCMeta
def __init__(self, **kwargs):
self.log = logging.getLogger("irchooky")
for prop in self.properties:
setattr(self, prop, kwargs.get(prop, ""))
def load(self, object_dict):
if not object_dict:
return
for prop in self.properties:
default = getattr(self, prop)
setattr(self, prop, object_dict.get(prop, default))
def __str__(self):
return_dict = {}
for prop in self.properties:
return_dict.update({prop: str(getattr(self, prop))})
return json.dumps(return_dict)
def __eq__(self, other):
for prop in self.properties:
if not getattr(self, prop) == getattr(other, prop):
self.log.debug("Property %s is different" % prop)
self.log.debug("%s != %s" % (getattr(self, prop),
getattr(other, prop)))
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
| 29.358974 | 67 | 0.567686 |
f71d9d0a976dd2d6fab7c86d11a2c85c733baff5 | 11,274 | py | Python | tfx/orchestration/kubeflow/executor_wrappers.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | 2 | 2019-07-08T20:56:13.000Z | 2020-08-04T17:07:26.000Z | tfx/orchestration/kubeflow/executor_wrappers.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | tfx/orchestration/kubeflow/executor_wrappers.py | HassanDayoub/tfx | dc9221abbb8dad991d1ae22fb91876da1290efae | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for TFX executors running as part of a Kubeflow pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import argparse
import json
import os
import re
from future import utils
import six
import tensorflow as tf
from typing import Any, Dict, List, Text
from tensorflow.python.lib.io import file_io # pylint: disable=g-direct-tensorflow-import
from tfx import version
from tfx.components.base import base_executor
from tfx.utils import import_utils
from tfx.utils import types
def parse_tfx_type(json_str: Text):
"""Parses a list of artifacts and their types from json."""
json_artifact_list = json.loads(json_str)
tfx_types = []
for json_artifact in json_artifact_list:
tfx_type = types.TfxArtifact.parse_from_json_dict(json_artifact)
tfx_types.append(tfx_type)
return tfx_types
def to_snake_case(name: Text):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class KubeflowExecutorWrapper(utils.with_metaclass(abc.ABCMeta), object):
"""Abstract base class for all Kubeflow Pipelines-based TFX components."""
def __init__(
self,
executor_class_path: Text,
name: Text,
input_dict: Dict[Text, List[types.TfxArtifact]],
outputs: Text,
exec_properties: Dict[Text, Any],
):
self._input_dict = input_dict
self._output_dict = types.parse_tfx_type_dict(outputs)
self._component_name = to_snake_case(name)
self._exec_properties = exec_properties
self._output_dir = self._exec_properties['output_dir']
self._workflow_id = os.environ['WORKFLOW_ID']
raw_args = self._exec_properties.get('beam_pipeline_args', [])
# Beam expects str types for it's pipeline args. Ensure unicode type is
# converted to str if required.
beam_pipeline_args = []
for arg in raw_args:
# In order to support both Py2 and Py3: Py3 doesn't have `unicode` type.
if six.PY2 and isinstance(arg, unicode):
arg = arg.encode('ascii', 'ignore')
beam_pipeline_args.append(arg)
# TODO(zhitaoli): Revisit usage of setup_file here.
module_dir = os.path.dirname(os.path.dirname(version.__file__))
setup_file = os.path.join(module_dir, 'setup.py')
tf.logging.info('Using setup_file \'%s\' to capture TFX dependencies',
setup_file)
beam_pipeline_args.append('--setup_file={}'.format(setup_file))
executor_cls = import_utils.import_class_by_path(executor_class_path)
# TODO(swoonna): Switch to execution_id when available
unique_id = '{}_{}'.format(self._component_name, self._workflow_id)
# TODO(swoonna): Add tmp_dir to additional_pipeline_args
executor_context = base_executor.BaseExecutor.Context(
beam_pipeline_args=beam_pipeline_args,
tmp_dir=os.path.join(self._output_dir, '.temp', ''),
unique_id=unique_id)
self._executor = executor_cls(executor_context)
def _set_outputs(self):
tf.logging.info('Using workflow id {}'.format(self._workflow_id))
max_input_span = 0
for input_list in self._input_dict.values():
for single_input in input_list:
max_input_span = max(max_input_span, single_input.span)
for output_name, output_artifact_list in self._output_dict.items():
for output_artifact in output_artifact_list:
output_artifact.uri = os.path.join(self._output_dir,
self._component_name, output_name,
self._workflow_id,
output_artifact.split, '')
output_artifact.span = max_input_span
def run(self, output_basedir: Text = '/'):
"""Runs the wrapped Executor, and writes metadata of output artifacts.
Args:
output_basedir: Base directory to which output artifacts metadata
is written. Useful for unit tests.
"""
self._executor.Do(self._input_dict, self._output_dict,
self._exec_properties)
output_dir = os.path.join(output_basedir, 'output/ml_metadata')
tf.gfile.MakeDirs(output_dir)
for output_name, output_artifact_list in self._output_dict.items():
filename = os.path.join(output_dir, output_name)
with file_io.FileIO(filename, 'w') as f:
output_list = [x.json_dict() for x in output_artifact_list]
f.write(json.dumps(output_list))
# TODO(b/132197968): Get rid of all the individual wrapper classes below and
# combine them into a single generic one that constructs the input dict from
# the individual named arguments instead. In the future, the generic wrapper
# can call into TFX drivers to handle component-specific logic as well.
class CsvExampleGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for CSVExampleGen component."""
def __init__(self, args: argparse.Namespace):
super(CsvExampleGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='CSVExampleGen',
input_dict={
'input-base': parse_tfx_type(args.input_base),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_input_artifact_span()
self._set_outputs()
def _set_input_artifact_span(self):
for input_artifact in self._input_dict['input-base']:
matched = re.match(r'span_([0-9]+)', input_artifact.uri)
span = matched.group(1) if matched else 1
input_artifact.span = span
class BigQueryExampleGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for BigQueryExampleGen component."""
def __init__(self, args: argparse.Namespace):
super(BigQueryExampleGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='BigQueryExampleGen',
input_dict={},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class StatisticsGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for StatisticsGen component."""
def __init__(self, args: argparse.Namespace):
super(StatisticsGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='StatisticsGen',
input_dict={
'input_data': parse_tfx_type(args.input_data),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class SchemaGenWrapper(KubeflowExecutorWrapper):
"""Wrapper for SchemaGen component."""
def __init__(self, args: argparse.Namespace):
super(SchemaGenWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='SchemaGen',
input_dict={
'stats': parse_tfx_type(args.stats),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class ExampleValidatorWrapper(KubeflowExecutorWrapper):
"""Wrapper for ExampleValidator component."""
def __init__(self, args: argparse.Namespace):
super(ExampleValidatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='ExampleValidator',
input_dict={
'stats': parse_tfx_type(args.stats),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class TransformWrapper(KubeflowExecutorWrapper):
"""Wrapper for Transform component."""
def __init__(self, args: argparse.Namespace):
super(TransformWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Transform',
input_dict={
'input_data': parse_tfx_type(args.input_data),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class TrainerWrapper(KubeflowExecutorWrapper):
"""Wrapper for Trainer component."""
def __init__(self, args: argparse.Namespace):
super(TrainerWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Trainer',
input_dict={
'transformed_examples': parse_tfx_type(args.transformed_examples),
'transform_output': parse_tfx_type(args.transform_output),
'schema': parse_tfx_type(args.schema),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
# TODO(ajaygopinathan): Implement warm starting.
self._exec_properties['warm_starting'] = False
self._exec_properties['warm_start_from'] = None
class EvaluatorWrapper(KubeflowExecutorWrapper):
"""Wrapper for Evaluator component."""
def __init__(self, args: argparse.Namespace):
super(EvaluatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Evaluator',
input_dict={
'examples': parse_tfx_type(args.examples),
'model_exports': parse_tfx_type(args.model_exports),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
class ModelValidatorWrapper(KubeflowExecutorWrapper):
"""Wrapper for ModelValidator component."""
def __init__(self, args: argparse.Namespace):
super(ModelValidatorWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='ModelValidator',
input_dict={
'examples': parse_tfx_type(args.examples),
'model': parse_tfx_type(args.model),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
# TODO(ajaygopinathan): Implement latest blessed model determination.
self._exec_properties['latest_blessed_model'] = None
self._exec_properties['latest_blessed_model_id'] = None
class PusherWrapper(KubeflowExecutorWrapper):
"""Wrapper for Pusher component."""
def __init__(self, args: argparse.Namespace):
super(PusherWrapper, self).__init__(
executor_class_path=args.executor_class_path,
name='Pusher',
input_dict={
'model_export': parse_tfx_type(args.model_export),
'model_blessing': parse_tfx_type(args.model_blessing),
},
outputs=args.outputs,
exec_properties=json.loads(args.exec_properties),
)
self._set_outputs()
# TODO(ajaygopinathan): Implement latest pushed model
self._exec_properties['latest_pushed_model'] = None
| 35.23125 | 90 | 0.699219 |
f71dbea28c6bb0f66e8170b73a2d179586fc3668 | 8,203 | py | Python | sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-09T08:59:13.000Z | 2022-03-09T08:59:13.000Z | sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/azure/identity/aio/_credentials/default.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import os
from typing import TYPE_CHECKING
from ..._constants import EnvironmentVariables
from ..._internal import get_default_authority, normalize_authority
from .azure_cli import AzureCliCredential
from .azure_powershell import AzurePowerShellCredential
from .chained import ChainedTokenCredential
from .environment import EnvironmentCredential
from .managed_identity import ManagedIdentityCredential
from .shared_cache import SharedTokenCacheCredential
from .vscode import VisualStudioCodeCredential
if TYPE_CHECKING:
from typing import Any, List
from azure.core.credentials import AccessToken
from azure.core.credentials_async import AsyncTokenCredential
_LOGGER = logging.getLogger(__name__)
class DefaultAzureCredential(ChainedTokenCredential):
"""A default credential capable of handling most Azure SDK authentication scenarios.
The identity it uses depends on the environment. When an access token is needed, it requests one using these
identities in turn, stopping when one provides a token:
1. A service principal configured by environment variables. See :class:`~azure.identity.aio.EnvironmentCredential`
for more details.
2. An Azure managed identity. See :class:`~azure.identity.aio.ManagedIdentityCredential` for more details.
3. On Windows only: a user who has signed in with a Microsoft application, such as Visual Studio. If multiple
identities are in the cache, then the value of the environment variable ``AZURE_USERNAME`` is used to select
which identity to use. See :class:`~azure.identity.aio.SharedTokenCacheCredential` for more details.
4. The user currently signed in to Visual Studio Code.
5. The identity currently logged in to the Azure CLI.
6. The identity currently logged in to Azure PowerShell.
This default behavior is configurable with keyword arguments.
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds. Managed identities ignore this because they reside in a single cloud.
:keyword bool exclude_cli_credential: Whether to exclude the Azure CLI from the credential. Defaults to **False**.
:keyword bool exclude_environment_credential: Whether to exclude a service principal configured by environment
variables from the credential. Defaults to **False**.
:keyword bool exclude_powershell_credential: Whether to exclude Azure PowerShell. Defaults to **False**.
:keyword bool exclude_visual_studio_code_credential: Whether to exclude stored credential from VS Code.
Defaults to **False**.
:keyword bool exclude_managed_identity_credential: Whether to exclude managed identity from the credential.
Defaults to **False**.
:keyword bool exclude_shared_token_cache_credential: Whether to exclude the shared token cache. Defaults to
**False**.
:keyword str managed_identity_client_id: The client ID of a user-assigned managed identity. Defaults to the value
of the environment variable AZURE_CLIENT_ID, if any. If not specified, a system-assigned identity will be used.
:keyword str shared_cache_username: Preferred username for :class:`~azure.identity.aio.SharedTokenCacheCredential`.
Defaults to the value of environment variable AZURE_USERNAME, if any.
:keyword str shared_cache_tenant_id: Preferred tenant for :class:`~azure.identity.aio.SharedTokenCacheCredential`.
Defaults to the value of environment variable AZURE_TENANT_ID, if any.
:keyword str visual_studio_code_tenant_id: Tenant ID to use when authenticating with
:class:`~azure.identity.aio.VisualStudioCodeCredential`. Defaults to the "Azure: Tenant" setting in VS Code's
user settings or, when that setting has no value, the "organizations" tenant, which supports only Azure Active
Directory work or school accounts.
"""
def __init__(self, **kwargs: "Any") -> None:
if "tenant_id" in kwargs:
raise TypeError("'tenant_id' is not supported in DefaultAzureCredential.")
authority = kwargs.pop("authority", None)
vscode_tenant_id = kwargs.pop(
"visual_studio_code_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
vscode_args = dict(kwargs)
if authority:
vscode_args["authority"] = authority
if vscode_tenant_id:
vscode_args["tenant_id"] = vscode_tenant_id
authority = normalize_authority(authority) if authority else get_default_authority()
shared_cache_username = kwargs.pop("shared_cache_username", os.environ.get(EnvironmentVariables.AZURE_USERNAME))
shared_cache_tenant_id = kwargs.pop(
"shared_cache_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
managed_identity_client_id = kwargs.pop(
"managed_identity_client_id", os.environ.get(EnvironmentVariables.AZURE_CLIENT_ID)
)
vscode_tenant_id = kwargs.pop(
"visual_studio_code_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
exclude_visual_studio_code_credential = kwargs.pop("exclude_visual_studio_code_credential", False)
exclude_cli_credential = kwargs.pop("exclude_cli_credential", False)
exclude_environment_credential = kwargs.pop("exclude_environment_credential", False)
exclude_managed_identity_credential = kwargs.pop("exclude_managed_identity_credential", False)
exclude_shared_token_cache_credential = kwargs.pop("exclude_shared_token_cache_credential", False)
exclude_powershell_credential = kwargs.pop("exclude_powershell_credential", False)
credentials = [] # type: List[AsyncTokenCredential]
if not exclude_environment_credential:
credentials.append(EnvironmentCredential(authority=authority, **kwargs))
if not exclude_managed_identity_credential:
credentials.append(ManagedIdentityCredential(client_id=managed_identity_client_id, **kwargs))
if not exclude_shared_token_cache_credential and SharedTokenCacheCredential.supported():
try:
# username and/or tenant_id are only required when the cache contains tokens for multiple identities
shared_cache = SharedTokenCacheCredential(
username=shared_cache_username, tenant_id=shared_cache_tenant_id, authority=authority, **kwargs
)
credentials.append(shared_cache)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.info("Shared token cache is unavailable: '%s'", ex)
if not exclude_visual_studio_code_credential:
credentials.append(VisualStudioCodeCredential(**vscode_args))
if not exclude_cli_credential:
credentials.append(AzureCliCredential())
if not exclude_powershell_credential:
credentials.append(AzurePowerShellCredential())
super().__init__(*credentials)
async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
"""Asynchronously request an access token for `scopes`.
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The exception has a
`message` attribute listing each authentication attempt and its error message.
"""
if self._successful_credential:
return await self._successful_credential.get_token(*scopes, **kwargs)
return await super().get_token(*scopes, **kwargs)
| 55.802721 | 120 | 0.737413 |
f71dfbaeac1fbc882ae16f2bdf6a603281da54bd | 498 | py | Python | Florence/Base/FlorenceExceptions.py | jdlaubrie/florence | 830dca4a34be00d6e53cbec3007c10d438b27f57 | [
"MIT"
] | 65 | 2017-08-04T10:21:13.000Z | 2022-02-21T21:45:09.000Z | Florence/Base/FlorenceExceptions.py | jdlaubrie/florence | 830dca4a34be00d6e53cbec3007c10d438b27f57 | [
"MIT"
] | 6 | 2018-06-03T02:29:20.000Z | 2022-01-18T02:30:22.000Z | Florence/Base/FlorenceExceptions.py | jdlaubrie/florence | 830dca4a34be00d6e53cbec3007c10d438b27f57 | [
"MIT"
] | 10 | 2018-05-30T09:44:10.000Z | 2021-05-18T08:06:51.000Z |
class JacobianError(ArithmeticError):
def __init__(self,value=None):
self.value = value
def __str__(self):
if self.value is None:
self.value = 'Jacobian of mapping is close to zero'
return repr(self.value)
class IllConditionedError(ArithmeticError):
def __init__(self,value=None):
self.value = value
def __str__(self):
if self.value is None:
self.value = 'Matrix is ill conditioned'
return repr(self.value) | 29.294118 | 63 | 0.638554 |
f71e08a94e9f047dfa54cd7ce41c674a5cd1f8bd | 15,999 | py | Python | game.py | theDrinkMD/twibbage | c0aba60bd2df50f0a5688db4a01048ea1efd1a45 | [
"MIT"
] | null | null | null | game.py | theDrinkMD/twibbage | c0aba60bd2df50f0a5688db4a01048ea1efd1a45 | [
"MIT"
] | null | null | null | game.py | theDrinkMD/twibbage | c0aba60bd2df50f0a5688db4a01048ea1efd1a45 | [
"MIT"
] | null | null | null | from flask import Flask, request
from twilio.twiml.messaging_response import MessagingResponse
from gameIdGenerator import createNewGameId
from models import Game, Player, Player_Answers, Question
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import dbManager
import logging
import gameManager
import messageSender
import os
from os.path import join, dirname
from dotenv import load_dotenv
app = Flask(__name__)
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
PRODUCTION_DATABASE_URL = os.environ.get("PRODUCTION_DATABASE_URL")
app.config['SQLALCHEMY_DATABASE_URI'] = PRODUCTION_DATABASE_URL
#app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://localhost/twibbage_db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
@app.route("/", methods=['GET', 'POST'])
def twibbage_game():
#INITIALIZE
from_number = request.values.get('From', None)
msg_body = request.values.get('Body', None)
lcase_msg_body = ''
if from_number is not None and msg_body is not None:
lcase_msg_body = unicode.encode(msg_body.lower())
lcase_msg_body = lcase_msg_body.strip()
dotenv_path = join(dirname(__file__), '.env')
load_dotenv(dotenv_path)
ACCOUNT_SID = os.environ.get("TWILIO_ACCOUNT_SID")
AUTH_TOKEN = os.environ.get("TWILIO_AUTH_TOKEN")
#Gamestate Variables for testing
already_in_game = True
current_question_id = 1
max_questions = 4
max_players = 2
game_state = "fakeanswertime"
response_string = ""
points_for_correct_guess = 200
points_for_fakeout = 100
someone_just_jerked_around = False
resp = MessagingResponse()
#resp.message("something is wrong")
if lcase_msg_body.startswith("newgame"):
#checkGameState(from_number) or checkInGame()
# Check if from number is already in a game.
if dbManager.checkIfMdnInGame(from_number) == 2:
response_string = "You're already in a game. To exit that game, respond with \"exitgame\""
else:
#lets parse the message and get the max_players and max questions
game_settings = msg_body.split()
player_alias = str(from_number)
try:
max_players = int(game_settings[1])
print("{} requested a max number of players of {}".format(from_number, max_players))
except IndexError:
max_players = 3
print("{} Did not request a maximum number of questions, defaulting to {}".format(from_number, max_players))
try:
max_questions = int(game_settings[2])
print("{} requested a max number of questions of {}".format(from_number, max_questions))
except IndexError:
max_questions = 3
print("{} Did not request a maximum number of questions, defaulting to {}".format(from_number, max_questions))
try:
player_alias = game_settings[3]
print("{} requested a new name of {}".format(from_number, player_alias))
except IndexError:
player_alias = str(from_number)
print("{} Did not request an alias... defaulting to {}".format(from_number, from_number))
#max_questions = msg_body[9:9]
#max_players = msg_body[11:]
#createGame(from_number, num_questions)
new_game = gameManager.createGame(from_number, max_questions, max_players, player_alias)
# creates a new game object, returns
#gameId = "A1B2"
response_string = "\r Starting a new game... \r - Game ID: {} \r - {} Questions \r - {} Players. " \
"\r Tell your so-called friends to text {} to this number to join. Text rules for... rules.".format(new_game, max_questions, max_players, new_game)
#send rules to host.
#gameManager.sendRules(from_number)
elif lcase_msg_body.startswith("exitgame"):
print("********** {} requested to exit the game. Removing player from game.".format(from_number))
#call exitGame(from_number) which should remove the person from a game
player_id = gameManager.removePlayerFromGame(from_number)
#now lets double check to make sure that this person Wasn't the game host.
#dbManager.updateGameState()
if player_id != 0:
#Check to see if the player ID is a host of an active game
if dbManager.isActiveHost(player_id):
print("********** {} was game host. Fully ending the game.".format(from_number))
ended_game = gameManager.endGameByPlayer(player_id)
response_string = "You have been removed. You were host and ended game too."
else:
response_string = "You have been removed from your current game. Bye!"
else:
response_string = "You asked to be removed, but we couldn't find you!"
elif (lcase_msg_body.startswith("rules") or lcase_msg_body.startswith("info")):
#send rules to host.
gameManager.sendRules(from_number)
else:
# So it's not a new game, which means this can be one of 4 things
#1. First we should check to see if the person is in a game
usr_status = dbManager.checkIfMdnInGame(from_number)
#if the user is either not found, or found, but not in a game,
#lets see what they've written
if usr_status == 0 or usr_status ==1:
#we assume the person is joining a game, so lets get the first 5 bytes
game_settings = lcase_msg_body.split()
try:
player_alias = game_settings[1]
print("{} requested a max number of players of {}".format(from_number, max_players))
except IndexError:
#if we're here that means they only entered 1 thing, the game ID
player_alias = from_number
print("{} Did not request a maximum number of questions, defaulting to {}".format(from_number, max_players))
response_string = gameManager.handleGameJoin(lcase_msg_body[:5].upper(),usr_status,from_number,player_alias)
#gameManager.sendRules(from_number)
#ITS AT THIS POINT WELL WANT TO CHECK TO SEE HOW MANY PLAYERS ARE NOW IN ONCE IVE Joined
my_game = dbManager.getActiveGameByPlayerNumber(from_number)
max_players = my_game.max_players
my_game_token = my_game.game_id
my_game_id = my_game.id
player_diff = max_players - dbManager.getPlayerCount(my_game_token)
if player_diff == 0 :
#holy shit it is timeee
resp.message(response_string)
response_string = "OHHH YEAH We're READY TO START THE GAME"
gameManager.startGame(my_game_id)
#if we've joined, and we're now the last player, then lets start the game
else:
#lets get this person's game object.
my_game = dbManager.getActiveGameByPlayerNumber(from_number)
max_players = my_game.max_players
my_game_token = my_game.game_id
my_player = dbManager.getPlayerByMdn(from_number)
#if we're here, then there are 3 possibilities for game state
#1. In The Lobby
if my_game.game_state == "lobby" :
# Still waiitng for pepole to join something = 1
player_diff = max_players - dbManager.getPlayerCount(my_game_token)
response_string = "\r Still waiting for {} player(s). Text rules for... rules".format(player_diff)
# Store off their fake answer in a DB with Question #, Game ID, from_number, realAnswer ==false
elif my_game.game_state == "fakeanswers":
#if it is fake answer time, we should be expecting questions here. So we'll want to store off people's answers
# 0. First lets make sure that I haven't already answered this question
print("Player About to Answer - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
if dbManager.checkIfPlayerAlreadyAnswered(my_game.id, my_game.current_question_sequence_number,my_player.id):
print("Player Already Answered - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
response_string = "You already answered!"
else:
#print("Not Yet Answered - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
#Check if person faked the right answer like a jerkface
if gameManager.fakeAnswerIsRealAnswer(my_game.current_question_id, lcase_msg_body):
response_string = "Well done hotshot... You selected the correct answer. Please reply with a FAKE answer..."
print("{} tried faking the correct answer...".format(from_number))
else:
print("")
# 1. Store off fake answer
dbManager.addPlayerAnswer(my_game.id, my_game.current_question_sequence_number,my_player.id,lcase_msg_body)
response_string = ""
messageSender.sendMessage(from_number, "Thanks for your fake answer! Waiting for other Players to enter theirs...")
#2. Check if I'm the last to answer
answer_count = dbManager.checkNumberPlayerAnswers(my_game.id,my_game.current_question_sequence_number)
player_count = dbManager.getPlayerCount(my_game_token)
answers_missing = player_count - answer_count
print("answers missing: " + str(answers_missing))
# If I'm last to answer,
if answers_missing == 0:
gameManager.moveToGuessTime(my_game.id)
elif my_game.game_state == "guesstime" :
#Get a person's Guess and store a person's guess
player_guess = lcase_msg_body
#check if the person already answered
if dbManager.checkIfPlayerAlreadyGuessed(my_game.id, my_game.current_question_sequence_number,my_player.id):
print("Player Already Guessed - My current q seq: {}".format(str(my_game.current_question_sequence_number)))
response_string = "\r You already Guessed!"
else:
#So this person hasn't submitted a valid guess yet...
#0. Lets get my curent player answer
my_player_answer = dbManager.getPlayerAnswer(my_game.id, my_game.current_question_sequence_number,my_player.id)
#If no, give the person Whos response was selected, a point
guessed_player_answer = dbManager.getPlayerAnswerByGuessId(my_game.id, my_game.current_question_sequence_number, player_guess[:1])
#is this person being an ass?
if lcase_msg_body == my_player_answer.fake_answer_guess_id:
response_string = "Come on now, you can't guess your own answer. Please sumbit another answer."
#is this an invalid answer?
elif lcase_msg_body.isdigit() == False:
response_string = "You just need to enter the NUMBER of the guess you wish to make. Try again. Like 1, or maybe 2!"
else:
#1. Finally... we can Store off guess
dbManager.updatePlayerAnswerGuess(my_player_answer.id, player_guess)
#Is this person's guess the right answer?
if dbManager.checkIfGuessRightAnswer(my_game.current_question_id, player_guess):
dbManager.updatePlayerScore(my_player.id, points_for_correct_guess)
messageSender.sendMessage(from_number, "\r Yay you got it correct! +{} points!".format(str(points_for_correct_guess)))
#Is this not even a valid response number?
elif guessed_player_answer is None:
#well shit, we already allowed him to save off his shit. we should undo thats
dbManager.updatePlayerAnswerGuess(my_player_answer.id, None)
someone_just_jerked_around = True
else:
dbManager.updatePlayerScore(guessed_player_answer.player_id, points_for_fakeout)
#message guesser saying "WRONG"
messageSender.sendMessage(from_number, "\r WRONG! You guessed someone else's fake answer!")
guessed_player_answer_mdn = dbManager.getPlayerMdnById(guessed_player_answer.player_id)
guessed_player_alias = dbManager.getPlayerById(guessed_player_answer.player_id)
#message faker saying someone guessed your shit! +x Points
#messageSender.sendMessage(guessed_player_answer_mdn, "HAHAHAHA. {} guessed your answer! +{} for fakeout!".format(from_number,points_for_fakeout))
messageSender.sendMessage(guessed_player_answer_mdn, "HAHAHAHA. {} guessed your answer! +{} for fakeout!".format(guessed_player_alias.player_name,points_for_fakeout))
if someone_just_jerked_around:
response_string = "You selected an invalid answer. Sry Bro"
else:
#now lets check whether i was the last to answer, then send scoreboard, and shift Gamestate
num_guesses = dbManager.getTotalGuesses(my_game.id,my_game.current_question_sequence_number)
total_players = dbManager.getPlayerCount(my_game_token)
if num_guesses == total_players:
#its time to change game state and send out results of the round
gameManager.sendResults(my_game.id)
game_continuing = gameManager.nextRound(my_game.id)
if not game_continuing:
response_string = "GAME OVER"
else:
response_string = ""
else:
#do nothing really - we're still waiting on other people
response_string = "Waiting for others to guess..."
else:
response_string = ""
return("<h1>Welcome to Twibbage</h1><br/><p>To play, text newgame q p to the number, whwere q is the number of quesitons, and p is the number of players you want in a game.</p>")
#finally, respond.
resp.message(response_string)
return str(resp)
if __name__ == "__main__":
app.run(debug=True)
| 59.475836 | 198 | 0.586349 |
f71e33bd084310d0aeba71a25aac58cd5fcf8bbb | 1,082 | py | Python | line.py | kavach-feature/Advanced_lane_finding | 12e4e330e338734fdb35655c7581b98ba1eb490b | [
"MIT"
] | null | null | null | line.py | kavach-feature/Advanced_lane_finding | 12e4e330e338734fdb35655c7581b98ba1eb490b | [
"MIT"
] | null | null | null | line.py | kavach-feature/Advanced_lane_finding | 12e4e330e338734fdb35655c7581b98ba1eb490b | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
class Line():
def __init__(self,n):
self.n=n
self.detected =False
#Polynomial coefficients of the lines
self.A=[]
self.B=[]
self.C=[]
#Running average of coefficients
self.A_avg=0.
self.B_avg=0.
self.C_avg=0.
def obtain_fit(self):
return (self.A_avg,self.B_avg,self.C_avg)
def update_fit(self,fit_coeffs):
"""Obtain the fit coefficients from the latest frame and apply over each of 2nd polynomial coefficients
for the purpose of smoothing
"""
full_Q= len(self.A) >= self.n
#Append line fit coefficients
self.A.append(fit_coeffs[0])
self.B.append(fit_coeffs[1])
self.C.append(fit_coeffs[2])
if full_Q:
_=self.A.pop(0)
_=self.B.pop(0)
_=self.C.pop(0)
# Compute the average of the polynomial coefficients
self.A_avg = np.mean(self.A)
self.B_avg = np.mean(self.B)
self.C_avg = np.mean(self.C)
return (self.A_avg,self.B_avg,self.C_avg)
| 18.655172 | 106 | 0.660813 |
f71e3bf6b5a4e1db0bd1e025dcf8b861aeb828af | 922 | py | Python | test/test_steps.py | jladdjr/ansible-builder | 7520396f8921b98a033a8f25248dbadb9cd83901 | [
"Apache-2.0"
] | null | null | null | test/test_steps.py | jladdjr/ansible-builder | 7520396f8921b98a033a8f25248dbadb9cd83901 | [
"Apache-2.0"
] | null | null | null | test/test_steps.py | jladdjr/ansible-builder | 7520396f8921b98a033a8f25248dbadb9cd83901 | [
"Apache-2.0"
] | null | null | null | import pytest
import textwrap
from ansible_builder.steps import AdditionalBuildSteps, PipSteps, BindepSteps
def test_steps_for_collection_dependencies():
assert list(PipSteps('requirements.txt')) == [
'ADD requirements.txt /build/',
'RUN pip3 install --upgrade -r /build/requirements.txt'
]
@pytest.mark.parametrize('verb', ['prepend', 'append'])
def test_additional_build_steps(verb):
additional_build_steps = {
'prepend': ["RUN echo This is the prepend test", "RUN whoami"],
'append': textwrap.dedent("""
RUN echo This is the append test
RUN whoami
""")
}
steps = AdditionalBuildSteps(additional_build_steps[verb])
assert len(list(steps)) == 2
def test_system_steps():
assert list(BindepSteps('bindep_output.txt')) == [
'ADD bindep_output.txt /build/',
'RUN dnf -y install $(cat /build/bindep_output.txt)'
]
| 27.939394 | 77 | 0.667028 |
f71e69d9aaba3771528b04fae2b24551b321c43d | 25,996 | py | Python | homeassistant/components/simplisafe/__init__.py | stravinci/AIS-home-assistant | ead4dafd3f801ebeb32860bd34443ed24a4f4167 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/simplisafe/__init__.py | stravinci/AIS-home-assistant | ead4dafd3f801ebeb32860bd34443ed24a4f4167 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/simplisafe/__init__.py | stravinci/AIS-home-assistant | ead4dafd3f801ebeb32860bd34443ed24a4f4167 | [
"Apache-2.0"
] | null | null | null | """Support for SimpliSafe alarm systems."""
import asyncio
from uuid import UUID
from simplipy import API
from simplipy.entity import EntityTypes
from simplipy.errors import EndpointUnavailable, InvalidCredentialsError, SimplipyError
from simplipy.websocket import (
EVENT_CAMERA_MOTION_DETECTED,
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_LOCK_LOCKED,
EVENT_LOCK_UNLOCKED,
EVENT_MOTION_DETECTED,
)
import voluptuous as vol
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import (
ATTR_CODE,
CONF_CODE,
CONF_TOKEN,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ATTR_ALARM_DURATION,
ATTR_ALARM_VOLUME,
ATTR_CHIME_VOLUME,
ATTR_ENTRY_DELAY_AWAY,
ATTR_ENTRY_DELAY_HOME,
ATTR_EXIT_DELAY_AWAY,
ATTR_EXIT_DELAY_HOME,
ATTR_LIGHT,
ATTR_VOICE_PROMPT_VOLUME,
DATA_CLIENT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
LOGGER,
VOLUMES,
)
DATA_LISTENER = "listener"
TOPIC_UPDATE_WEBSOCKET = "simplisafe_update_websocket_{0}"
EVENT_SIMPLISAFE_EVENT = "SIMPLISAFE_EVENT"
EVENT_SIMPLISAFE_NOTIFICATION = "SIMPLISAFE_NOTIFICATION"
DEFAULT_SOCKET_MIN_RETRY = 15
SUPPORTED_PLATFORMS = (
"alarm_control_panel",
"binary_sensor",
"lock",
"sensor",
)
WEBSOCKET_EVENTS_REQUIRING_SERIAL = [EVENT_LOCK_LOCKED, EVENT_LOCK_UNLOCKED]
WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT = [
EVENT_CAMERA_MOTION_DETECTED,
EVENT_DOORBELL_DETECTED,
EVENT_ENTRY_DETECTED,
EVENT_MOTION_DETECTED,
]
ATTR_CATEGORY = "category"
ATTR_LAST_EVENT_CHANGED_BY = "last_event_changed_by"
ATTR_LAST_EVENT_INFO = "last_event_info"
ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name"
ATTR_LAST_EVENT_SENSOR_SERIAL = "last_event_sensor_serial"
ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type"
ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_MESSAGE = "message"
ATTR_PIN_LABEL = "label"
ATTR_PIN_LABEL_OR_VALUE = "label_or_pin"
ATTR_PIN_VALUE = "pin"
ATTR_SYSTEM_ID = "system_id"
ATTR_TIMESTAMP = "timestamp"
SERVICE_BASE_SCHEMA = vol.Schema({vol.Required(ATTR_SYSTEM_ID): cv.positive_int})
SERVICE_REMOVE_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL_OR_VALUE): cv.string}
)
SERVICE_SET_PIN_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{vol.Required(ATTR_PIN_LABEL): cv.string, vol.Required(ATTR_PIN_VALUE): cv.string}
)
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA = SERVICE_BASE_SCHEMA.extend(
{
vol.Optional(ATTR_ALARM_DURATION): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=480)
),
vol.Optional(ATTR_ALARM_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_CHIME_VOLUME): vol.All(vol.Coerce(int), vol.In(VOLUMES)),
vol.Optional(ATTR_ENTRY_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=30, max=255)
),
vol.Optional(ATTR_ENTRY_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_EXIT_DELAY_AWAY): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(min=45, max=255)
),
vol.Optional(ATTR_EXIT_DELAY_HOME): vol.All(
cv.time_period, lambda value: value.seconds, vol.Range(max=255)
),
vol.Optional(ATTR_LIGHT): cv.boolean,
vol.Optional(ATTR_VOICE_PROMPT_VOLUME): vol.All(
vol.Coerce(int), vol.In(VOLUMES)
),
}
)
CONFIG_SCHEMA = cv.deprecated(DOMAIN, invalidation_version="0.119")
@callback
def _async_save_refresh_token(hass, config_entry, token):
"""Save a refresh token to the config entry."""
hass.config_entries.async_update_entry(
config_entry, data={**config_entry.data, CONF_TOKEN: token}
)
async def async_get_client_id(hass):
"""Get a client ID (based on the HASS unique ID) for the SimpliSafe API.
Note that SimpliSafe requires full, "dashed" versions of UUIDs.
"""
hass_id = await hass.helpers.instance_id.async_get()
return str(UUID(hass_id))
async def async_register_base_station(hass, system, config_entry_id):
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry_id,
identifiers={(DOMAIN, system.serial)},
manufacturer="SimpliSafe",
model=system.version,
name=system.address,
)
async def async_setup(hass, config):
"""Set up the SimpliSafe component."""
hass.data[DOMAIN] = {DATA_CLIENT: {}, DATA_LISTENER: {}}
return True
async def async_setup_entry(hass, config_entry):
"""Set up SimpliSafe as config entry."""
entry_updates = {}
if not config_entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
entry_updates["unique_id"] = config_entry.data[CONF_USERNAME]
if CONF_CODE in config_entry.data:
# If an alarm code was provided as part of configuration.yaml, pop it out of
# the config entry's data and move it to options:
data = {**config_entry.data}
entry_updates["data"] = data
entry_updates["options"] = {
**config_entry.options,
CONF_CODE: data.pop(CONF_CODE),
}
if entry_updates:
hass.config_entries.async_update_entry(config_entry, **entry_updates)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
client_id = await async_get_client_id(hass)
websession = aiohttp_client.async_get_clientsession(hass)
try:
api = await API.login_via_token(
config_entry.data[CONF_TOKEN], client_id=client_id, session=websession
)
except InvalidCredentialsError:
LOGGER.error("Invalid credentials provided")
return False
except SimplipyError as err:
LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
_async_save_refresh_token(hass, config_entry, api.refresh_token)
simplisafe = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = SimpliSafe(
hass, api, config_entry
)
await simplisafe.async_init()
for platform in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
@callback
def verify_system_exists(coro):
"""Log an error if a service call uses an invalid system ID."""
async def decorator(call):
"""Decorate."""
system_id = int(call.data[ATTR_SYSTEM_ID])
if system_id not in simplisafe.systems:
LOGGER.error("Unknown system ID in service call: %s", system_id)
return
await coro(call)
return decorator
@callback
def v3_only(coro):
"""Log an error if the decorated coroutine is called with a v2 system."""
async def decorator(call):
"""Decorate."""
system = simplisafe.systems[int(call.data[ATTR_SYSTEM_ID])]
if system.version != 3:
LOGGER.error("Service only available on V3 systems")
return
await coro(call)
return decorator
@verify_system_exists
@_verify_domain_control
async def clear_notifications(call):
"""Clear all active notifications."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.clear_notifications()
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def remove_pin(call):
"""Remove a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.remove_pin(call.data[ATTR_PIN_LABEL_OR_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@_verify_domain_control
async def set_pin(call):
"""Set a PIN."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_pin(call.data[ATTR_PIN_LABEL], call.data[ATTR_PIN_VALUE])
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
@verify_system_exists
@v3_only
@_verify_domain_control
async def set_system_properties(call):
"""Set one or more system parameters."""
system = simplisafe.systems[call.data[ATTR_SYSTEM_ID]]
try:
await system.set_properties(
{
prop: value
for prop, value in call.data.items()
if prop != ATTR_SYSTEM_ID
}
)
except SimplipyError as err:
LOGGER.error("Error during service call: %s", err)
return
for service, method, schema in [
("clear_notifications", clear_notifications, None),
("remove_pin", remove_pin, SERVICE_REMOVE_PIN_SCHEMA),
("set_pin", set_pin, SERVICE_SET_PIN_SCHEMA),
(
"set_system_properties",
set_system_properties,
SERVICE_SET_SYSTEM_PROPERTIES_SCHEMA,
),
]:
async_register_admin_service(hass, DOMAIN, service, method, schema=schema)
config_entry.add_update_listener(async_reload_entry)
return True
async def async_unload_entry(hass, entry):
"""Unload a SimpliSafe config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in SUPPORTED_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_CLIENT].pop(entry.entry_id)
remove_listener = hass.data[DOMAIN][DATA_LISTENER].pop(entry.entry_id)
remove_listener()
return unload_ok
async def async_reload_entry(hass, config_entry):
"""Handle an options update."""
await hass.config_entries.async_reload(config_entry.entry_id)
class SimpliSafeWebsocket:
"""Define a SimpliSafe websocket "manager" object."""
def __init__(self, hass, websocket):
"""Initialize."""
self._hass = hass
self._websocket = websocket
@staticmethod
def _on_connect():
"""Define a handler to fire when the websocket is connected."""
LOGGER.info("Connected to websocket")
@staticmethod
def _on_disconnect():
"""Define a handler to fire when the websocket is disconnected."""
LOGGER.info("Disconnected from websocket")
def _on_event(self, event):
"""Define a handler to fire when a new SimpliSafe event arrives."""
LOGGER.debug("New websocket event: %s", event)
async_dispatcher_send(
self._hass, TOPIC_UPDATE_WEBSOCKET.format(event.system_id), event
)
if event.event_type not in WEBSOCKET_EVENTS_TO_TRIGGER_HASS_EVENT:
return
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_EVENT,
event_data={
ATTR_LAST_EVENT_CHANGED_BY: event.changed_by,
ATTR_LAST_EVENT_TYPE: event.event_type,
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_SERIAL: event.sensor_serial,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_SYSTEM_ID: event.system_id,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
},
)
async def async_connect(self):
"""Register handlers and connect to the websocket."""
self._websocket.on_connect(self._on_connect)
self._websocket.on_disconnect(self._on_disconnect)
self._websocket.on_event(self._on_event)
await self._websocket.async_connect()
async def async_disconnect(self):
"""Disconnect from the websocket."""
await self._websocket.async_disconnect()
class SimpliSafe:
"""Define a SimpliSafe data object."""
def __init__(self, hass, api, config_entry):
"""Initialize."""
self._api = api
self._emergency_refresh_token_used = False
self._hass = hass
self._system_notifications = {}
self.config_entry = config_entry
self.coordinator = None
self.initial_event_to_use = {}
self.systems = {}
self.websocket = SimpliSafeWebsocket(hass, api.websocket)
@callback
def _async_process_new_notifications(self, system):
"""Act on any new system notifications."""
if self._hass.state != CoreState.running:
# If HASS isn't fully running yet, it may cause the SIMPLISAFE_NOTIFICATION
# event to fire before dependent components (like automation) are fully
# ready. If that's the case, skip:
return
latest_notifications = set(system.notifications)
to_add = latest_notifications.difference(
self._system_notifications[system.system_id]
)
if not to_add:
return
LOGGER.debug("New system notifications: %s", to_add)
self._system_notifications[system.system_id].update(to_add)
for notification in to_add:
text = notification.text
if notification.link:
text = f"{text} For more information: {notification.link}"
self._hass.bus.async_fire(
EVENT_SIMPLISAFE_NOTIFICATION,
event_data={
ATTR_CATEGORY: notification.category,
ATTR_CODE: notification.code,
ATTR_MESSAGE: text,
ATTR_TIMESTAMP: notification.timestamp,
},
)
async def async_init(self):
"""Initialize the data class."""
asyncio.create_task(self.websocket.async_connect())
async def async_websocket_disconnect(_):
"""Define an event handler to disconnect from the websocket."""
await self.websocket.async_disconnect()
self._hass.data[DOMAIN][DATA_LISTENER][
self.config_entry.entry_id
] = self._hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, async_websocket_disconnect
)
self.systems = await self._api.get_systems()
for system in self.systems.values():
self._system_notifications[system.system_id] = set()
self._hass.async_create_task(
async_register_base_station(
self._hass, system, self.config_entry.entry_id
)
)
# Future events will come from the websocket, but since subscription to the
# websocket doesn't provide the most recent event, we grab it from the REST
# API to ensure event-related attributes aren't empty on startup:
try:
self.initial_event_to_use[
system.system_id
] = await system.get_latest_event()
except SimplipyError as err:
LOGGER.error("Error while fetching initial event: %s", err)
self.initial_event_to_use[system.system_id] = {}
self.coordinator = DataUpdateCoordinator(
self._hass,
LOGGER,
name=self.config_entry.data[CONF_USERNAME],
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=self.async_update,
)
async def async_update(self):
"""Get updated data from SimpliSafe."""
async def async_update_system(system):
"""Update a system."""
await system.update(cached=system.version != 3)
self._async_process_new_notifications(system)
tasks = [async_update_system(system) for system in self.systems.values()]
results = await asyncio.gather(*tasks, return_exceptions=True)
for result in results:
if isinstance(result, InvalidCredentialsError):
if self._emergency_refresh_token_used:
matching_flows = [
flow
for flow in self._hass.config_entries.flow.async_progress()
if flow["context"].get("source") == SOURCE_REAUTH
and flow["context"].get("unique_id")
== self.config_entry.unique_id
]
if not matching_flows:
self._hass.async_create_task(
self._hass.config_entries.flow.async_init(
DOMAIN,
context={
"source": SOURCE_REAUTH,
"unique_id": self.config_entry.unique_id,
},
data=self.config_entry.data,
)
)
LOGGER.error("Update failed with stored refresh token")
raise UpdateFailed from result
LOGGER.warning("SimpliSafe cloud error; trying stored refresh token")
self._emergency_refresh_token_used = True
try:
await self._api.refresh_access_token(
self.config_entry.data[CONF_TOKEN]
)
return
except SimplipyError as err:
LOGGER.error("Error while using stored refresh token: %s", err)
raise UpdateFailed from err
if isinstance(result, EndpointUnavailable):
# In case the user attempt an action not allowed in their current plan,
# we merely log that message at INFO level (so the user is aware,
# but not spammed with ERROR messages that they cannot change):
LOGGER.info(result)
raise UpdateFailed from result
if isinstance(result, SimplipyError):
LOGGER.error("SimpliSafe error while updating: %s", result)
raise UpdateFailed from result
if isinstance(result, Exception):
LOGGER.error("Unknown error while updating: %s", result)
raise UpdateFailed from result
if self._api.refresh_token != self.config_entry.data[CONF_TOKEN]:
_async_save_refresh_token(
self._hass, self.config_entry, self._api.refresh_token
)
# If we've reached this point using an emergency refresh token, we're in the
# clear and we can discard it:
if self._emergency_refresh_token_used:
self._emergency_refresh_token_used = False
class SimpliSafeEntity(CoordinatorEntity):
"""Define a base SimpliSafe entity."""
def __init__(self, simplisafe, system, name, *, serial=None):
"""Initialize."""
super().__init__(simplisafe.coordinator)
self._name = name
self._online = True
self._simplisafe = simplisafe
self._system = system
self.websocket_events_to_listen_for = [
EVENT_CONNECTION_LOST,
EVENT_CONNECTION_RESTORED,
]
if serial:
self._serial = serial
else:
self._serial = system.serial
try:
sensor_type = EntityTypes(
simplisafe.initial_event_to_use[system.system_id].get("sensorType")
)
except ValueError:
sensor_type = EntityTypes.unknown
self._attrs = {
ATTR_LAST_EVENT_INFO: simplisafe.initial_event_to_use[system.system_id].get(
"info"
),
ATTR_LAST_EVENT_SENSOR_NAME: simplisafe.initial_event_to_use[
system.system_id
].get("sensorName"),
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type.name,
ATTR_LAST_EVENT_TIMESTAMP: simplisafe.initial_event_to_use[
system.system_id
].get("eventTimestamp"),
ATTR_SYSTEM_ID: system.system_id,
}
self._device_info = {
"identifiers": {(DOMAIN, system.system_id)},
"manufacturer": "SimpliSafe",
"model": system.version,
"name": name,
"via_device": (DOMAIN, system.serial),
}
@property
def available(self):
"""Return whether the entity is available."""
# We can easily detect if the V3 system is offline, but no simple check exists
# for the V2 system. Therefore, assuming the coordinator hasn't failed, we mark
# the entity as available if:
# 1. We can verify that the system is online (assuming True if we can't)
# 2. We can verify that the entity is online
return not (self._system.version == 3 and self._system.offline) and self._online
@property
def device_info(self):
"""Return device registry information for this entity."""
return self._device_info
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attrs
@property
def name(self):
"""Return the name of the entity."""
return f"{self._system.address} {self._name}"
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._serial
@callback
def _async_internal_update_from_websocket_event(self, event):
"""Perform internal websocket handling prior to handing off."""
if event.event_type == EVENT_CONNECTION_LOST:
self._online = False
elif event.event_type == EVENT_CONNECTION_RESTORED:
self._online = True
# It's uncertain whether SimpliSafe events will still propagate down the
# websocket when the base station is offline. Just in case, we guard against
# further action until connection is restored:
if not self._online:
return
if event.sensor_type:
sensor_type = event.sensor_type.name
else:
sensor_type = None
self._attrs.update(
{
ATTR_LAST_EVENT_INFO: event.info,
ATTR_LAST_EVENT_SENSOR_NAME: event.sensor_name,
ATTR_LAST_EVENT_SENSOR_TYPE: sensor_type,
ATTR_LAST_EVENT_TIMESTAMP: event.timestamp,
}
)
self.async_update_from_websocket_event(event)
@callback
def _handle_coordinator_update(self):
"""Update the entity with new REST API data."""
self.async_update_from_rest_api()
self.async_write_ha_state()
@callback
def _handle_websocket_update(self, event):
"""Update the entity with new websocket data."""
# Ignore this event if it belongs to a system other than this one:
if event.system_id != self._system.system_id:
return
# Ignore this event if this entity hasn't expressed interest in its type:
if event.event_type not in self.websocket_events_to_listen_for:
return
# Ignore this event if it belongs to a entity with a different serial
# number from this one's:
if (
event.event_type in WEBSOCKET_EVENTS_REQUIRING_SERIAL
and event.sensor_serial != self._serial
):
return
self._async_internal_update_from_websocket_event(event)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
TOPIC_UPDATE_WEBSOCKET.format(self._system.system_id),
self._handle_websocket_update,
)
)
self.async_update_from_rest_api()
@callback
def async_update_from_rest_api(self):
"""Update the entity with the provided REST API data."""
raise NotImplementedError()
@callback
def async_update_from_websocket_event(self, event):
"""Update the entity with the provided websocket event."""
class SimpliSafeBaseSensor(SimpliSafeEntity):
"""Define a SimpliSafe base (binary) sensor."""
def __init__(self, simplisafe, system, sensor):
"""Initialize."""
super().__init__(simplisafe, system, sensor.name, serial=sensor.serial)
self._device_info["identifiers"] = {(DOMAIN, sensor.serial)}
self._device_info["model"] = sensor.type.name
self._device_info["name"] = sensor.name
self._sensor = sensor
self._sensor_type_human_name = " ".join(
[w.title() for w in self._sensor.type.name.split("_")]
)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._system.address} {self._name} {self._sensor_type_human_name}"
| 34.52324 | 88 | 0.637021 |
f71e69e86ee5de49d09df8256001723a4356642b | 4,280 | py | Python | parsing/tracking_logs/generate_course_tracking_logs.py | andyzsf/edx_data_research | 07a587edb1dc4797f116bfeb60ffecbf4ce5bd7a | [
"MIT"
] | null | null | null | parsing/tracking_logs/generate_course_tracking_logs.py | andyzsf/edx_data_research | 07a587edb1dc4797f116bfeb60ffecbf4ce5bd7a | [
"MIT"
] | null | null | null | parsing/tracking_logs/generate_course_tracking_logs.py | andyzsf/edx_data_research | 07a587edb1dc4797f116bfeb60ffecbf4ce5bd7a | [
"MIT"
] | null | null | null | '''
This module will extract tracking logs for a given course and date range
between when course enrollment start and when the course ended. For each log,
the parent_data and meta_data from the course_structure collection will be
appended to the log based on the event key in the log
'''
import pymongo
import sys
from datetime import datetime
import json
def connect_to_db_collection(db_name, collection_name):
'''
Return collection of a given database name and collection name
'''
connection = pymongo.Connection('localhost', 27017)
db = connection[db_name]
collection = db[collection_name]
return collection
def load_config(config_file):
'''
Return course ids and ranges of dates from which course specific tracking
logs will be extracted
'''
with open(config_file) as file_handler:
data = json.load(file_handler)
if not isinstance(data['course_ids'], list):
raise ValueError('Expecting list of course ids')
try:
start_date = datetime.strptime(data['date_of_course_enrollment'], '%Y-%m-%d')
end_date = datetime.strptime(data['date_of_course_completion'], '%Y-%m-%d')
except ValueError:
raise ValueError('Incorrect data format, should be YYYY-MM-DD')
return data['course_ids'], start_date.date(), end_date.date()
def append_course_structure_data(course_structure_collection, _id, document):
'''
Append parent_data and metadata (if exists) from course structure to
tracking log
'''
try:
data = course_structure_collection.find({"_id" : _id})[0]
if 'parent_data' in data:
document['parent_data'] = data['parent_data']
if 'metadata' in data:
document['metadata'] = data['metadata']
except:
pass
def extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date):
'''
Return all trackings logs that contain given ids and that contain dates
within the given range
'''
documents = source_collection.find({'course_id' : { '$in' : course_ids }})
for document in documents:
if start_date <= datetime.strptime(document['time'].split('T')[0], "%Y-%m-%d").date() <= end_date:
# Bind parent_data and metadata from course_structure to tracking document
bound = False
if document['event']:
if isinstance(document['event'], dict):
if 'id' in document['event']:
splitted = document['event']['id'].split('-')
if len(splitted) > 3:
document['event']['id'] = splitted[-1]
if not bound:
append_course_structure_data(course_structure_collection, document['event']['id'], document)
bound = True
if document['page']:
splitted = document['page'].split('/')
if len(splitted) > 2:
document['page'] = splitted[-2]
if not bound:
append_course_structure_data(course_structure_collection, document['page'], document)
# End of binding, now insert document into collection
destination_collection.insert(document)
def main():
if len(sys.argv) != 6:
usage_message = """usage: %s source_db destination_db course_config_file
Provide name of course database to insert tracking logs to and
config file to load configurations\n
"""
sys.stderr.write(usage_message % sys.argv[0])
sys.exit(1)
source_db = sys.argv[1]
destination_db = sys.argv[2]
source_collection = connect_to_db_collection(source_db, 'tracking')
destination_collection = connect_to_db_collection(destination_db, 'tracking')
course_structure_collection = connect_to_db_collection(destination_db, 'course_structure')
course_ids, start_date, end_date = load_config(sys.argv[3])
extract_tracking_logs(source_collection, destination_collection, course_structure_collection, course_ids, start_date, end_date)
if __name__ == '__main__':
main()
| 40 | 132 | 0.642991 |