text
stringlengths 0
1.05M
| meta
dict |
---|---|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from pyramid.renderers import render
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
from mako.exceptions import TopLevelLookupException
from premailer import Premailer
def process_html(body):
return Premailer(body,
keep_style_tags=True,
include_star_selectors=True).transform()
def send(request, template_name, vars, to=None, from_=None,
bcc=None, cc=None):
settings = request.registry.settings
subject = render('emails/%s.subject.txt' % template_name,
vars, request)
subject = subject.strip()
msg = Message(
subject=subject,
sender=from_ or settings['mailer.from'],
recipients=to or [settings['mailer.from']],
)
try:
html_body = render('emails/%s.html' % template_name,
vars, request)
except TopLevelLookupException:
pass
else:
msg.html = process_html(html_body)
msg.body = render('emails/%s.txt' % template_name,
vars, request)
mailer = get_mailer(request)
mailer.send(msg)
def send_with_admin(request, template_name, vars, to=None, from_=None,
bcc=None, cc=None, reply_to=None):
raise NotImplementedError
| {
"repo_name": "storborg/warpworks",
"path": "warpworks/mail.py",
"copies": "1",
"size": "1404",
"license": "mit",
"hash": 9175132279240191000,
"line_mean": 28.25,
"line_max": 70,
"alpha_frac": 0.6203703704,
"autogenerated": false,
"ratio": 3.9773371104815864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 48
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from pyramid.settings import asbool
from .client import ElasticClient
__version__ = '0.3.2.dev'
def client_from_config(settings, prefix='elastic.'):
"""
Instantiate and configure an Elasticsearch from settings.
In typical Pyramid usage, you shouldn't use this directly: instead, just
include ``pyramid_es`` and use the :py:func:`get_client` function to get
access to the shared :py:class:`.client.ElasticClient` instance.
"""
return ElasticClient(
servers=settings.get(prefix + 'servers', ['localhost:9200']),
timeout=settings.get(prefix + 'timeout', 1.0),
index=settings[prefix + 'index'],
use_transaction=asbool(settings.get(prefix + 'use_transaction', True)),
disable_indexing=settings.get(prefix + 'disable_indexing', False))
def includeme(config):
registry = config.registry
settings = registry.settings
client = client_from_config(settings)
if asbool(settings.get('elastic.ensure_index_on_start')):
client.ensure_index()
registry.pyramid_es_client = client
def get_client(request):
"""
Get the registered Elasticsearch client. The supplied argument can be
either a ``Request`` instance or a ``Registry``.
"""
registry = getattr(request, 'registry', None)
if registry is None:
registry = request
return registry.pyramid_es_client
| {
"repo_name": "storborg/pyramid_es",
"path": "pyramid_es/__init__.py",
"copies": "1",
"size": "1488",
"license": "mit",
"hash": 105021976366827100,
"line_mean": 31.347826087,
"line_max": 79,
"alpha_frac": 0.6767473118,
"autogenerated": false,
"ratio": 4.076712328767123,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5253459640567124,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
from matplotlib.pyplot import FuncFormatter
dollar = lambda x, pos: '$%1.2f' % x
currency = dollar
comma = lambda x, pos: '{:0,d}'.format(int(x))
millions = lambda x, pos: '$%1.1fM' % (x*1e-6)
percent = lambda x, pos: '{0:.0f}%'.format(x*100)
LABEL_FORMATS = {
'comma': comma,
'dollar': dollar,
'currency': currency,
'millions': millions,
'percent': percent
}
class scale_y_continuous(scale):
VALID_SCALES = ['name', 'labels', 'limits', 'breaks', 'trans']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.ylab = self.name.title()
if not (self.labels is None):
if self.labels in LABEL_FORMATS:
format_func = LABEL_FORMATS[self.labels]
gg.ytick_formatter = FuncFormatter(format_func)
else:
gg.ytick_labels = self.labels
if not (self.limits is None):
gg.ylimits = self.limits
if not (self.breaks is None):
gg.ybreaks = self.breaks
return gg
| {
"repo_name": "bitemyapp/ggplot",
"path": "ggplot/scales/scale_y_continuous.py",
"copies": "12",
"size": "1202",
"license": "bsd-2-clause",
"hash": -2603605603936013000,
"line_mean": 30.6315789474,
"line_max": 66,
"alpha_frac": 0.578202995,
"autogenerated": false,
"ratio": 3.357541899441341,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.032047148666277836,
"num_lines": 38
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
import brewer2mpl
def _number_to_palette(ctype, n):
n -= 1
palettes = sorted(brewer2mpl.COLOR_MAPS[ctype].keys())
if n < len(palettes):
return palettes[n]
def _handle_shorthand(text):
abbrevs = {
"seq": "Sequential",
"qual": "Qualitative",
"div": "Diverging"
}
text = abbrevs.get(text, text)
text = text.title()
return text
class scale_color_brewer(scale):
"""
Use ColorBrewer (http://colorbrewer2.org/) style colors
Parameters
----------
type: string
One of seq (sequential), div (diverging) or qual (qualitative)
palette: string
If a string, will use that named palette. If a number, will index into
the list of palettes of appropriate type
Examples
--------
>>> from ggplot import *
>>> p = ggplot(aes(x='carat', y='price', colour='clarity'), data=diamonds)
>>> p += geom_point()
>>> print(p + scale_color_brewer(palette=4))
>>> print(p + scale_color_brewer(type='diverging'))
>>> print(p + scale_color_brewer(type='div'))
>>> print(p + scale_color_brewer(type='seq'))
>>> print(p + scale_color_brewer(type='seq', palette='Blues'))
"""
VALID_SCALES = ['type', 'palette']
def __radd__(self, gg):
# gg = deepcopy(gg)
if self.type:
ctype = self.type
else:
ctype = "Sequential"
ctype = _handle_shorthand(ctype)
if self.palette:
palette = self.palette
else:
palette = _number_to_palette(ctype, 1)
if isinstance(palette, int):
palette = _number_to_palette(ctype, palette)
# color brewer requires a minimum of 3 colors in a palette
try:
color_col = gg._aes.data.get('color', gg._aes.data.get('fill'))
n_colors = max(gg.data[color_col].nunique(), 3)
except:
# If we are neither using 'color' nor 'fill' then assume there is
# only one color used
n_colors = 3
bmap = brewer2mpl.get_map(palette, ctype, n_colors)
gg.manual_color_list = bmap.hex_colors
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_color_brewer.py",
"copies": "1",
"size": "2321",
"license": "bsd-2-clause",
"hash": 4730006391397340000,
"line_mean": 28.3797468354,
"line_max": 78,
"alpha_frac": 0.5721671693,
"autogenerated": false,
"ratio": 3.5435114503816796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9594964329243469,
"avg_score": 0.004142858087642025,
"num_lines": 79
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap, rgb2hex, ColorConverter
def colors_at_breaks(cmap, breaks=[0, 0.25, 0.5, 0.75, 1.]):
return [rgb2hex(cmap(bb)[:3]) for bb in breaks]
class scale_color_gradient(scale):
"""
Specify a two- or three-point gradient.
Parameters
----------
name:
Name of an existing gradient scheme
limis :
list of the upper and lower bounds of the gradient
low
colour at the lower bound of the gradient
mid
colour at the middle of the gradient
high:
Colour at the upper bound of the gradient
Examples
--------
>>> from ggplot import *
>>> diamons_premium = diamonds[diamonds.cut=='Premium']
>>> gg = ggplot(diamons_premium, aes(x='depth', y='carat', colour='price')) + \\
... geom_point()
>>> print(gg + scale_colour_gradient(low='red', mid='white', high='blue', limits=[4000,6000]) + \\
... ggtitle('With red-blue gradient'))
>>> print(gg + ggtitle('With standard gradient'))
"""
VALID_SCALES = ['name', 'limits', 'low', 'mid', 'high']
def __radd__(self, gg):
# gg = deepcopy(gg)
# TODO: ???
# if self.name:
# gg.color_label = self.name
if not (self.limits is None):
gg.color_limits = self.limits
color_spectrum = []
if self.low:
color_spectrum.append(self.low)
if self.mid:
color_spectrum.append(self.mid)
if self.high:
color_spectrum.append(self.high)
if self.low and self.high:
gradient2n = LinearSegmentedColormap.from_list('gradient2n', color_spectrum)
plt.cm.register_cmap(cmap=gradient2n)
# add them back to ggplot
gg.color_scale = colors_at_breaks(gradient2n)
gg.colormap = gradient2n
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_color_gradient.py",
"copies": "1",
"size": "2076",
"license": "bsd-2-clause",
"hash": -3048938840685457000,
"line_mean": 31.4375,
"line_max": 102,
"alpha_frac": 0.5910404624,
"autogenerated": false,
"ratio": 3.6808510638297873,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47718915262297873,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
CRAYON_COLORS = {
"red": "#ed0a3f",
"maroon": "#c32148",
"scarlet": "#fd0e35",
"brick red": "#c62d42",
"english vermilion": "#cc474b",
"madder lake": "#cc3336",
"permanent geranium lake": "#e12c2c",
"maximum red": "#d92121",
"indian red": "#b94e48",
"orange-red": "#ff5349",
"sunset orange": "#fe4c40",
"bittersweet": "#fe6f5e",
"dark venetian red": "#b33b24",
"venetian red": "#cc553d",
"light venetian red": "#e6735c",
"vivid tangerine": "#ff9980",
"middle red": "#e58e73",
"burnt orange": "#ff7f49",
"red-orange": "#ff681f",
"orange": "#ff8833",
"macaroni and cheese": "#ffb97b",
"middle yellow red": "#ecb176",
"mango tango": "#e77200",
"yellow-orange": "#ffae42",
"maximum yellow red": "#f2ba49",
"banana mania": "#fbe7b2",
"maize": "#f2c649",
"orange-yellow": "#f8d568",
"goldenrod": "#fcd667",
"dandelion": "#fed85d",
"yellow": "#fbe870",
"green-yellow": "#f1e788",
"middle yellow": "#ffeb00",
"olive green": "#b5b35c",
"spring green": "#ecebbd",
"maximum yellow": "#fafa37",
"canary": "#ffff99",
"lemon yellow": "#ffff9f",
"maximum green yellow": "#d9e650",
"middle green yellow": "#acbf60",
"inchworm": "#afe313",
"light chrome green": "#bee64b",
"yellow-green": "#c5e17a",
"maximum green": "#5e8c31",
"asparagus": "#7ba05b",
"granny smith apple": "#9de093",
"fern": "#63b76c",
"middle green": "#4d8c57",
"green": "#3aa655",
"medium chrome green": "#6ca67c",
"forest green": "#5fa777",
"sea green": "#93dfb8",
"shamrock": "#33cc99",
"mountain meadow": "#1ab385",
"jungle green": "#29ab87",
"caribbean green": "#00cc99",
"tropical rain forest": "#00755e",
"middle blue green": "#8dd9cc",
"pine green": "#01786f",
"maximum blue green": "#30bfbf",
"robin's egg blue": "#00cccc",
"teal blue": "#008080",
"light blue": "#8fd8d8",
"aquamarine": "#95e0e8",
"turquoise blue": "#6cdae7",
"outer space": "#2d383a",
"sky blue": "#76d7ea",
"middle blue": "#7ed4e6",
"blue-green": "#0095b7",
"pacific blue": "#009dc4",
"cerulean": "#02a4d3",
"maximum blue": "#47abcc",
"blue1": "#4997d0",
"cerulean blue": "#339acc",
"cornflower": "#93ccea",
"green-blue": "#2887c8",
"midnight blue": "#00468c",
"navy blue": "#0066cc",
"denim": "#1560bd",
"blue3": "#0066ff",
"cadet blue": "#a9b2c3",
"periwinkle": "#c3cde6",
"blue2": "#4570e6",
"wild blue yonder": "#7a89b8",
"indigo": "#4f69c6",
"manatee": "#8d90a1",
"cobalt blue": "#8c90c8",
"celestial blue": "#7070cc",
"blue bell": "#9999cc",
"maximum blue purple": "#acace6",
"violet-blue": "#766ec8",
"blue-violet": "#6456b7",
"ultramarine blue": "#3f26bf",
"middle blue purple": "#8b72be",
"purple heart": "#652dc1",
"royal purple": "#6b3fa0",
"violet2": "#8359a3",
"medium violet": "#8f47b3",
"wisteria": "#c9a0dc",
"lavender1": "#bf8fcc",
"vivid violet": "#803790",
"maximum purple": "#733380",
"purple mountains' majesty": "#d6aedd",
"fuchsia": "#c154c1",
"pink flamingo": "#fc74fd",
"violet1": "#732e6c",
"brilliant rose": "#e667ce",
"orchid": "#e29cd2",
"plum": "#8e3179",
"medium rose": "#d96cbe",
"thistle": "#ebb0d7",
"mulberry": "#c8509b",
"red-violet": "#bb3385",
"middle purple": "#d982b5",
"maximum red purple": "#a63a79",
"jazzberry jam": "#a50b5e",
"eggplant": "#614051",
"magenta": "#f653a6",
"cerise": "#da3287",
"wild strawberry": "#ff3399",
"lavender2": "#fbaed2",
"cotton candy": "#ffb7d5",
"carnation pink": "#ffa6c9",
"violet-red": "#f7468a",
"razzmatazz": "#e30b5c",
"pig pink": "#fdd7e4",
"carmine": "#e62e6b",
"blush": "#db5079",
"tickle me pink": "#fc80a5",
"mauvelous": "#f091a9",
"salmon": "#ff91a4",
"middle red purple": "#a55353",
"mahogany": "#ca3435",
"melon": "#febaad",
"pink sherbert": "#f7a38e",
"burnt sienna": "#e97451",
"brown": "#af593e",
"sepia": "#9e5b40",
"fuzzy wuzzy": "#87421f",
"beaver": "#926f5b",
"tumbleweed": "#dea681",
"raw sienna": "#d27d46",
"van dyke brown": "#664228",
"tan": "#d99a6c",
"desert sand": "#edc9af",
"peach": "#ffcba4",
"burnt umber": "#805533",
"apricot": "#fdd5b1",
"almond": "#eed9c4",
"raw umber": "#665233",
"shadow": "#837050",
"raw sienna1": "#e6bc5c",
"timberwolf": "#d9d6cf",
"gold1": "#92926e",
"gold2": "#e6be8a",
"silver": "#c9c0bb",
"copper": "#da8a67",
"antique brass": "#c88a65",
"black": "#000000",
"charcoal gray": "#736a62",
"gray": "#8b8680",
"blue-gray": "#c8c8cd",
"radical red": "#ff355e",
"wild watermelon": "#fd5b78",
"outrageous orange": "#ff6037",
"atomic tangerine": "#ff9966",
"neon carrot": "#ff9933",
"sunglow": "#ffcc33",
"laser lemon": "#ffff66",
"unmellow yellow": "#ffff66",
"electric lime": "#ccff00",
"screamin' green": "#66ff66",
"magic mint": "#aaf0d1",
"blizzard blue": "#50bfe6",
"shocking pink": "#ff6eff",
"razzle dazzle rose": "#ee34d2",
"hot magenta": "#ff00cc",
"purple pizzazz": "#ff00cc",
"sizzling red": "#ff3855",
"red salsa": "#fd3a4a",
"tart orange": "#fb4d46",
"orange soda": "#fa5b3d",
"bright yellow": "#ffaa1d",
"yellow sunshine": "#fff700",
"slimy green": "#299617",
"green lizard": "#a7f432",
"denim blue": "#2243b6",
"blue jeans": "#5dadec",
"plump purple": "#5946b2",
"purple plum": "#9c51b6",
"sweet brown": "#a83731",
"brown sugar": "#af6e4d",
"eerie black": "#1b1b1b",
"black shadows": "#bfafb2",
"fiery rose": "#ff5470",
"sizzling sunrise": "#ffdb00",
"heat wave": "#ff7a00",
"lemon glacier": "#fdff00",
"spring frost": "#87ff2a",
"absolute zero": "#0048ba",
"winter sky": "#ff007c",
"frostbite": "#e936a7",
"alloy orange": "#c46210",
"b'dazzled blue": "#2e5894",
"big dip o' ruby": "#9c2542",
"bittersweet shimmer": "#bf4f51",
"blast off bronze": "#a57164",
"cyber grape": "#58427c",
"deep space sparkle": "#4a646c",
"gold fusion": "#85754e",
"illuminating emerald": "#319177",
"metallic seaweed": "#0a7e8c",
"metallic sunburst": "#9c7c38",
"razzmic berry": "#8d4e85",
"sheen green": "#8fd400",
"shimmering blush": "#d98695",
"sonic silver": "#757575",
"steel blue": "#0081ab",
"aztec gold": "#c39953",
"burnished brown": "#a17a74",
"cerulean frost": "#6d9bc3",
"cinnamon satin": "#cd607e",
"copper penny": "#ad6f69",
"cosmic cobalt": "#2e2d88",
"glossy grape": "#ab92b3",
"granite gray": "#676767",
"green sheen": "#6eaea1",
"lilac luster": "#ae98aa",
"misty moss": "#bbb477",
"mystic maroon": "#ad4379",
"pearly purple": "#b768a2",
"pewter blue": "#8ba8b7",
"polished pine": "#5da493",
"quick silver": "#a6a6a6",
"rose dust": "#9e5e6f",
"rusty red": "#da2c43",
"shadow blue": "#778ba5",
"shiny shamrock": "#5fa778",
"steel teal": "#5f8a8b",
"sugar plum": "#914e75",
"twilight lavender": "#8a496b",
"wintergreen dream": "#56887d",
"baby powder": "#fefefa",
"banana": "#ffd12a",
"blueberry": "#4f86f7",
"bubble gum": "#ffd3f8",
"cedar chest": "#c95a49",
"cherry": "#da2647",
"chocolate": "#bd8260",
"coconut": "#fefefe",
"daffodil": "#ffff31",
"eucalyptus": "#44d7a8",
"fresh air": "#a6e7ff",
"grape": "#6f2da8",
"jelly bean": "#da614e",
"leather jacket": "#253529",
"lemon": "#ffff38",
"licorice": "#1a1110",
"lilac": "#db91ef",
"lime": "#b2f302",
"lumber": "#ffe4cd",
"new car": "#214fc6",
"orange": "#ff8866",
"peach": "#ffd0b9",
"pine": "#45a27d",
"rose": "#ff5050",
"shampoo": "#ffcff1",
"smoke": "#738276",
"soap": "#cec8ef",
"strawberry": "#fc5a8d",
"tulip": "#ff878d",
"amethyst": "#64609a",
"citrine": "#933709",
"emerald": "#14a989",
"jade": "#469a84",
"jasper": "#d05340",
"lapis lazuli": "#436cb9",
"malachite": "#469496",
"moonstone": "#3aa8c1",
"onyx": "#353839",
"peridot": "#abad48",
"pink pearl": "#b07080",
"rose quartz": "#bd559c",
"ruby": "#aa4069",
"sapphire": "#2d5da1",
"smokey topaz": "#832a0d",
"tiger's eye": "#b56917",
"baseball mitt": "#e97451",
"bubble bath": "#fc80a5",
"earthworm": "#c62d42",
"flower shop": "#c9a0dc",
"fresh air": "#76d7ea",
"grandma's perfume": "#ff8833",
"koala tree": "#29ab87",
"pet shop": "#af593e",
"pine tree": "#01786f",
"saw dust": "#ffcba4",
"sharpening pencils": "#fcd667",
"smell the roses": "#ed0a3f",
"sunny day": "#fbe870",
"wash the dog": "#fed85d",
"alien armpit": "#84de02",
"big foot feet": "#e88e5a",
"booger buster": "#dde26a",
"dingy dungeon": "#c53151",
"gargoyle gas": "#ffdf46",
"giant's club": "#b05c52",
"magic potion": "#ff4466",
"mummy's tomb": "#828e84",
"ogre odor": "#fd5240",
"pixie powder": "#391285",
"princess perfume": "#ff85cf",
"sasquatch socks": "#ff4681",
"sea serpent": "#4bc7cf",
"smashed pumpkin": "#ff6d3a",
"sunburnt cyclops": "#ff404c",
"winter wizard": "#a0e6f"
}
class scale_color_crayon(scale):
"""
Use crayon colors in your plots
Examples
--------
>>> from ggplot import *
>>> import pandas as pd
>>> df = pd.DataFrame(dict(x=range(3), y=range(3), crayon=['sunset orange', 'inchworm', 'cadet blue']))
>>> p = ggplot(aes(x='x', y='y', color='crayon'), data=df)
>>> p += geom_point(size=250)
>>> print(p + scale_color_crayon())
"""
VALID_SCALES = []
def __radd__(self, gg):
colors = sorted(gg.data[gg._aes['color']].unique())
gg.manual_color_list = []
for color in colors:
new_color = CRAYON_COLORS.get(color.lower())
if not new_color:
raise Exception("Color not found: %s" % color)
gg.manual_color_list.append(new_color)
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_color_crayon.py",
"copies": "1",
"size": "10469",
"license": "bsd-2-clause",
"hash": -7495222878609140000,
"line_mean": 28.9114285714,
"line_max": 107,
"alpha_frac": 0.5402617251,
"autogenerated": false,
"ratio": 2.5256936067551266,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8565823056722852,
"avg_score": 0.000026455026455026453,
"num_lines": 350
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_color_funfetti(scale):
"""
Make your plots look like funfetti
Parameters
----------
type: string
One of confetti or sprinkles (defaults to sprinkles)
Examples
--------
>>> from ggplot import *
>>> p = ggplot(aes(x='carat', y='price', colour='clarity'), data=diamonds)
>>> p += geom_point()
>>> print(p + scale_color_funfetti())
"""
VALID_SCALES = ['type', 'palette']
def __radd__(self, gg):
color_maps = {
"confetti": [
"#a864fd",
"#29cdff",
"#78ff44",
"#ff718d",
"#fdff6a"
],
"sprinkles": [
"#F8909F",
"#C5DE9C",
"#8BF3EF",
"#F9AA50",
"#EDE5D9"
]
}
# try:
# color_col = gg._aes.data.get('color', gg._aes.data.get('fill'))
# n_colors = max(gg.data[color_col].nunique(), 3)
# except:
# n_colors = 5
gg.manual_color_list = color_maps.get(self.type, color_maps['sprinkles'])
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_color_funfetti.py",
"copies": "1",
"size": "1317",
"license": "bsd-2-clause",
"hash": -8669295723441150000,
"line_mean": 25.34,
"line_max": 81,
"alpha_frac": 0.4608959757,
"autogenerated": false,
"ratio": 3.5885558583106265,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9548232321815504,
"avg_score": 0.00024390243902439024,
"num_lines": 50
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_color_manual(scale):
"""
Specify a list of colors to use manually.
Parameters
----------
values: list of colors/strings
List of colors with length greater than or equal to the number
of unique discrete items to which you want to apply color.
Examples
--------
>>> from ggplot import *
>>> color_list = ['#FFAAAA', '#ff5b00', '#c760ff', '#f43605', '#00FF00',
... '#0000FF', '#4c9085']
>>> lng = pd.melt(meat, ['date'])
>>> gg = ggplot(lng, aes('date', 'value', color='variable')) + \\
... geom_point()
>>> print(gg + scale_colour_manual(values=color_list) + \\
... ggtitle('With manual colors'))
>>> print(gg + ggtitle('Without manual colors'))
"""
VALID_SCALES = ['values']
def __radd__(self, gg):
if not (self.values is None):
n_colors_needed = gg.data[gg._aes.data['color']].nunique()
n_colors_provided = len(self.values)
if n_colors_provided < n_colors_needed:
msg = 'Error: Insufficient values in manual scale. {0} needed but only {1} provided.'
raise Exception(msg.format(n_colors_needed, n_colors_provided))
gg.manual_color_list = self.values[:n_colors_needed]
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_color_manual.py",
"copies": "1",
"size": "1463",
"license": "bsd-2-clause",
"hash": -8635842462810478000,
"line_mean": 37.5,
"line_max": 101,
"alpha_frac": 0.5755297334,
"autogenerated": false,
"ratio": 3.713197969543147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9782738465142764,
"avg_score": 0.0011978475600766623,
"num_lines": 38
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_colour_manual(scale):
"""
Specify a list of colors to use manually.
Parameters
----------
values : list of colors/strings
List of colors with length greater than or equal to the number
of unique discrete items to which you want to apply color.
Examples
--------
>>> from ggplot import *
>>> color_list = ['#FFAAAA', '#ff5b00', '#c760ff', '#f43605', '#00FF00',
... '#0000FF', '#4c9085']
>>> lng = pd.melt(meat, ['date'])
>>> gg = ggplot(lng, aes('date', 'value', color='variable')) + \\
... geom_point()
>>> print(gg + scale_colour_manual(values=color_list) + \\
... ggtitle('With manual colors'))
>>> print(gg + ggtitle('Without manual colors'))
"""
VALID_SCALES = ['values']
def __radd__(self, gg):
gg = deepcopy(gg)
if not (self.values is None):
n_colors_needed = gg.data[gg.aesthetics['color']].nunique()
n_colors_provided = len(self.values)
if n_colors_provided < n_colors_needed:
msg = 'Error: Insufficient values in manual scale. {0} needed but only {1} provided.'
raise Exception(msg.format(n_colors_needed, n_colors_provided))
gg.manual_color_list = self.values[:n_colors_needed]
return gg
| {
"repo_name": "udacity/ggplot",
"path": "ggplot/scales/scale_colour_manual.py",
"copies": "12",
"size": "1505",
"license": "bsd-2-clause",
"hash": -3758991458376241700,
"line_mean": 36.625,
"line_max": 101,
"alpha_frac": 0.5700996678,
"autogenerated": false,
"ratio": 3.7344913151364763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.033915732959850606,
"num_lines": 40
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_fill_manual(scale):
"""
Specify a list of colors to use manually.
Parameters
----------
values: list of colors/strings
List of colors with length greater than or equal to the number
of unique discrete items to which you want to apply color.
Examples
--------
>>> from ggplot import *
>>> color_list = ['#FFAAAA', '#ff5b00', '#c760ff', '#f43605', '#00FF00',
... '#0000FF', '#4c9085']
>>> lng = pd.melt(meat, ['date'])
>>> gg = ggplot(lng, aes('date', fill='variable')) + \\
... geom_bar()
>>> print(gg + scale_fill_manual(values=color_list) + \\
... ggtitle('With manual colors'))
>>> print(gg + ggtitle('Without manual colors'))
"""
VALID_SCALES = ['values']
def __radd__(self, gg):
if not (self.values is None):
n_colors_needed = gg.data[gg._aes.data['fill']].nunique()
n_colors_provided = len(self.values)
if n_colors_provided < n_colors_needed:
msg = 'Error: Insufficient values in manual scale. {0} needed but only {1} provided.'
raise Exception(msg.format(n_colors_needed, n_colors_provided))
gg.manual_fill_list = self.values[:n_colors_needed]
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_fill_manual.py",
"copies": "1",
"size": "1446",
"license": "bsd-2-clause",
"hash": 7794467174568878000,
"line_mean": 37.0526315789,
"line_max": 101,
"alpha_frac": 0.5733056708,
"autogenerated": false,
"ratio": 3.6982097186700766,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4771515389470077,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .scale import scale
from copy import deepcopy
class scale_y_log(scale):
"""
Make y axis log based
Parameters
----------
base:
log base to use (defaults to 10)
Examples
--------
>>> ggplot(diamonds, aes(x='price')) + geom_histogram() + scale_y_log()
>>> ggplot(diamonds, aes(x='price')) + geom_histogram() + scale_y_log(base=2)
"""
def __init__(self, base=10):
self.base = base
def __radd__(self, gg):
gg = deepcopy(gg)
gg.scale_y_log = self.base
return gg
class scale_x_log(scale):
"""
Make x axis log based
Parameters
----------
base:
log base to use (defaults to 10)
Examples
--------
>>> ggplot(diamonds, aes(x='price', y='carat')) + geom_point() + scale_x_log()
>>> ggplot(diamonds, aes(x='price', y='carat')) + geom_point() + scale_x_log(base=2)
"""
def __init__(self, base=10):
self.base = base
def __radd__(self, gg, base=10):
gg = deepcopy(gg)
gg.scale_x_log = self.base
return gg
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/scales/scale_log.py",
"copies": "1",
"size": "1190",
"license": "bsd-2-clause",
"hash": 4459924831120872000,
"line_mean": 22.3333333333,
"line_max": 88,
"alpha_frac": 0.5327731092,
"autogenerated": false,
"ratio": 3.278236914600551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43110100238005505,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six import string_types
import numpy as np
import scipy.stats
import pandas as pd
from ggplot.utils import make_iterable_ntimes
from .stat import stat
def bootstrap_statistics(series, statistic, n_samples=1000, confidence_interval=0.95):
"""
Default parameters taken from
R's Hmisc smean.cl.boot
"""
alpha = 1 - confidence_interval
inds = np.random.randint(0, len(series), size=(n_samples, len(series)))
samples = series.values[inds]
means = np.sort(statistic(samples, axis=1))
return pd.Series({'ymin': means[int((alpha/2)*n_samples)],
'ymax': means[int((1-alpha/2)*n_samples)],
'y': statistic(series)})
def mean_cl_boot(series, n_samples=1000, confidence_interval=0.95):
return bootstrap_statistics(series, np.mean, n_samples=n_samples, confidence_interval=confidence_interval)
def mean_cl_normal(series, confidence_interval=0.95):
"""
Adapted from http://stackoverflow.com/a/15034143
"""
a = np.asarray(series)
m = np.mean(a)
se = scipy.stats.sem(a)
h = se * scipy.stats.t._ppf((1+confidence_interval)/2, len(a)-1)
return pd.Series({'y': m,
'ymin': m-h,
'ymax': m+h})
def mean_sdl(series, mult=2):
m = series.mean()
s = series.std()
return pd.Series({'y': m,
'ymin': m-mult*s,
'ymax': m+mult*s})
def median_hilow(series, confidence_interval=0.95):
tail = (1 - confidence_interval) / 2
return pd.Series({'y': np.median(series),
'ymin': np.percentile(series, 100 * tail),
'ymax': np.percentile(series, 100 * (1 - tail))})
def mean_se(series, mult=1):
m = np.mean(series)
se = mult * np.sqrt(np.var(series) / len(series))
return pd.Series({'y': m,
'ymin': m-se,
'ymax': m+se})
function_dict = {'mean_cl_boot': mean_cl_boot,
'mean_cl_normal': mean_cl_normal,
'mean_sdl': mean_sdl,
'median_hilow': median_hilow,
'mean_se': mean_se}
def combined_fun_data(series, fun_y, fun_ymin, fun_ymax):
d = {}
if fun_y:
d['y'] = fun_y(series)
if fun_ymin:
d['ymin'] = fun_ymin(series)
if fun_ymax:
d['ymax'] = fun_ymax(series)
return pd.Series(d)
class stat_summary(stat):
"""
Calculate summary statistics depending on x, usually by
calculating three values ymin, y and ymax for each value of x.
Parameters
----------
fun_data : string or function
One of `"mean_cl_boot"`, `"mean_cl_normal"`, `"mean_sdl"`, `"median_hilow"` or
any function that takes a pandas series and returns a series with three
rows indexed as `y`, `ymin` and `ymax`. Defaults to `"mean_cl_boot"`.
fun_y, fun_ymin, fun_ymax : function
Any function that takes a pandas series and returns a value
Notes
-----
If any of `fun_y`, `fun_ymin` or `fun_ymax` are provided, the value of
`fun_data` will be ignored.
As R's syntax `fun.data = some_function` is not valid in python, here
`fun_data = somefunction` is used for now.
Examples
--------
General usage:
.. plot::
:include-source:
from ggplot import *
ggplot(aes(x='cut', y='carat'), data=diamonds) \\
+ stat_summary(fun_data = 'mean_cl_boot')
Provide own function:
.. plot::
:include-source:
import numpy as np
from ggplot import *
def median_quantile(series):
return pd.Series({'y': np.median(series),
'ymin': np.percentile(series, 5),
'ymax': np.percentile(series, 95)})
ggplot(aes(x='cut', y='carat'), data=diamonds) \\
+ stat_summary(fun_data = median_quantile)
Provide different funtions for y, ymin and ymax:
.. plot:
:include-source:
import numpy as np
from ggplot import *
ggplot(aes(x='cut', y='carat'), data=diamonds) \\
+ stat_summary(fun_y = np.median, fun_ymin=np.min, fun_ymax=np.max)
"""
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'pointrange', 'position': 'identity', 'fun_data': 'mean_cl_boot',
'fun_y': None, 'fun_ymin': None, 'fun_ymax': None}
CREATES = {'ymin', 'ymax'}
def _calculate(self, data):
if self.params['fun_y'] or self.params['fun_ymin'] or self.params['fun_ymax']:
fun_data = lambda s: combined_fun_data(s, self.params['fun_y'], self.params['fun_ymin'], self.params['fun_ymax'])
elif isinstance(self.params['fun_data'], string_types):
fun_data = function_dict[self.params['fun_data']]
else:
fun_data = self.params['fun_data']
new_data = data.groupby('x').apply(lambda df: fun_data(df['y'])).reset_index()
data.pop('x')
data.pop('y')
# Copy the other aesthetics into the new dataframe
n = len(new_data.x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| {
"repo_name": "kmather73/ggplot",
"path": "ggplot/stats/stat_summary.py",
"copies": "12",
"size": "5323",
"license": "bsd-2-clause",
"hash": -7371929121938018000,
"line_mean": 29.9476744186,
"line_max": 125,
"alpha_frac": 0.5650948713,
"autogenerated": false,
"ratio": 3.472276581865623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from sqlalchemy import MetaData, Table, Column, types, create_engine, select
from .base import BaseBackend
class SQLBackend(BaseBackend):
def __init__(self, url, table_name='gimlet_channels', **engine_kwargs):
meta = MetaData(bind=create_engine(url, **engine_kwargs))
self.table = Table(table_name, meta,
Column('id', types.Integer, primary_key=True),
Column('key', types.CHAR(32), nullable=False,
unique=True),
Column('data', types.LargeBinary, nullable=False))
self.table.create(checkfirst=True)
def __setitem__(self, key, value):
table = self.table
key_col = table.c.key
raw = self.serialize(value)
# Check if this key exists with a SELECT FOR UPDATE, to protect
# against a race with other concurrent writers of this key.
r = table.count(key_col == key, for_update=True).scalar()
if r:
# If it exists, use an UPDATE.
table.update().values(data=raw).where(key_col == key).execute()
else:
# Otherwise INSERT.
table.insert().values(key=key, data=raw).execute()
def __getitem__(self, key):
raw = select([self.table.c.data], self.table.c.key == key).scalar()
if raw:
return self.deserialize(raw)
else:
raise KeyError('key %r not found' % key)
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/backends/sql.py",
"copies": "1",
"size": "1568",
"license": "mit",
"hash": 8681673147649421000,
"line_mean": 40.2631578947,
"line_max": 77,
"alpha_frac": 0.5727040816,
"autogenerated": false,
"ratio": 4.126315789473685,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5199019871073685,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from ..dotdict import DotDict
class TestDotDict(TestCase):
def test_get(self):
dd = DotDict({'a': 42,
'b': 'hello'})
self.assertEqual(dd['b'], 'hello')
self.assertEqual(dd.b, 'hello')
def test_recursive(self):
dd = DotDict({'a': 42,
'b': {'one': 1,
'two': 2,
'three': 3}})
self.assertEqual(dd['b']['two'], 2)
self.assertEqual(dd.b.two, 2)
def test_recursive_list(self):
dd = DotDict({
'organization': 'Avengers',
'members': [
{'id': 1, 'name': 'Bruce Banner'},
{'id': 2, 'name': 'Tony Stark'},
{'id': 3, 'name': 'Steve Rogers'},
{'id': 4, 'name': 'Natasha Romanoff'}
]
})
self.assertEqual(dd.members[1].name, 'Tony Stark')
def test_set(self):
dd = DotDict({'a': 4,
'b': 9})
dd.c = 16
self.assertEqual(dd.c, 16)
self.assertEqual(dd['c'], 16)
def test_del(self):
dd = DotDict({'a': 123,
'b': 456})
del dd.b
self.assertEqual(dict(dd), {'a': 123})
def test_repr(self):
dd = DotDict({'a': 1})
self.assertIn(repr(dd), ["<DotDict({'a': 1})>",
"<DotDict({u'a': 1})>"])
| {
"repo_name": "storborg/pyramid_es",
"path": "pyramid_es/tests/test_dotdict.py",
"copies": "1",
"size": "1553",
"license": "mit",
"hash": 8909887035107215000,
"line_mean": 29.4509803922,
"line_max": 66,
"alpha_frac": 0.4365743722,
"autogenerated": false,
"ratio": 3.715311004784689,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9651885376984688,
"avg_score": 0,
"num_lines": 51
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from gimlet.backends.sql import SQLBackend
from gimlet.util import asbool, parse_settings
class TestUtil(TestCase):
def test_asbool_true(self):
for val in ('T', 'trUe', 'y', 'yes', 'on', '1', True, 1):
self.assertTrue(asbool(val))
def test_asbool_false(self):
for val in ('a', 'f', 'false', 'no', False, 0, None):
self.assertFalse(asbool(val))
def test_parse_settings(self):
settings = {
'gimlet.backend': 'sql',
'gimlet.backend.url': 'sqlite:///:memory:',
'gimlet.secret': 'super-secret',
'gimlet.permanent': 'true',
'non-gimlet-setting': None,
}
options = parse_settings(settings)
self.assertNotIn('non-gimlet-setting', options)
self.assertEqual(options['permanent'], True)
self.assertIsInstance(options['backend'], SQLBackend)
def test_parse_settings_absolute_backend(self):
settings = {
'backend': 'gimlet.backends.sql',
'backend.url': 'sqlite:///:memory:',
'secret': 'super-secret',
}
options = parse_settings(settings, prefix='')
self.assertIsInstance(options['backend'], SQLBackend)
def test_parse_settings_None_backend(self):
settings = {
'backend': None,
'secret': 'super-secret',
}
parse_settings(settings, prefix='')
def test_parse_settings_bad_backend(self):
settings = {
'backend': object,
'secret': 'super-secret',
}
self.assertRaises(ValueError, parse_settings, settings, prefix='')
def test_parse_settings_unknown_backend(self):
settings = {
'backend': 'unknown_backend',
'secret': 'super-secret',
}
self.assertRaises(ImportError, parse_settings, settings, prefix='')
def test_parse_settings_no_secret(self):
self.assertRaises(ValueError, parse_settings, {})
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/tests/test_util.py",
"copies": "1",
"size": "2125",
"license": "mit",
"hash": 3255154723701884000,
"line_mean": 32.7301587302,
"line_max": 75,
"alpha_frac": 0.5807058824,
"autogenerated": false,
"ratio": 4.009433962264151,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.509013984466415,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from ..mixin import ElasticMixin, ESMapping, ESString, ESProp
def rgb_to_hex(rgb):
return ('#' + ('%02x' * 3)) % rgb
class ESColor(ESProp):
def __init__(self, name, *args, **kwargs):
ESProp.__init__(self, name, *args, filter=rgb_to_hex, **kwargs)
class Thing(object):
def __init__(self, id, foreground, child=None):
self.id = id
self.foreground = foreground
self.child = child
class TestMixin(TestCase):
def test_custom_prop(self):
mapping = ESColor('foreground')
obj = Thing(id=42, foreground=(60, 40, 30))
doc = mapping(obj)
self.assertEqual(doc, '#3c281e')
def test_elastic_mixin_no_mapping(self):
class Foo(ElasticMixin):
pass
with self.assertRaises(NotImplementedError):
Foo.elastic_mapping()
def test_nested_mappings(self):
mapping = ESMapping(
analyzer='lowercase',
properties=ESMapping(
ESColor('foreground'),
child=ESMapping(
analyzer='lowercase',
properties=ESMapping(
ESColor('foreground')))))
thing1 = Thing(id=1,
foreground=(40, 20, 27))
thing2 = Thing(id=2,
foreground=(37, 88, 19),
child=thing1)
doc = mapping(thing2)
self.assertEqual(doc['_id'], 2)
self.assertEqual(doc['child']['_id'], 1)
def test_nested_mappings_dict(self):
mapping = ESMapping(
analyzer='lowercase',
properties=ESMapping(
ESColor('foreground'),
child=dict(
analyzer='lowercase',
properties=ESMapping(
ESColor('foreground')))))
thing1 = Thing(id=1,
foreground=(40, 20, 27))
thing2 = Thing(id=2,
foreground=(37, 88, 19),
child=thing1)
doc = mapping(thing2)
self.assertEqual(doc['_id'], 2)
self.assertEqual(doc['child']['_id'], 1)
def test_contains(self):
mapping = ESMapping(
ESString("name"),
ESString("body"))
self.assertIn('name', mapping)
self.assertNotIn('foo', mapping)
def test_getitem(self):
name_field = ESString('name', analyzer='lowercase')
mapping = ESMapping(
name_field,
ESString("body"))
self.assertEqual(mapping['name'], name_field)
self.assertEqual(mapping['name']['analyzer'], 'lowercase')
def test_setitem(self):
name_field = ESString('foo')
name_field['analyzer'] = 'lowercase'
self.assertEqual(name_field['analyzer'], 'lowercase')
def test_update(self):
mapping_base = ESMapping(
ESString('name'),
ESString('body'),
ESString('color'))
mapping_new = ESMapping(
ESString('name', analyzer='lowercase'),
ESString('foo'))
self.assertNotIn('analyzer', mapping_base['name'])
mapping_base.update(mapping_new)
self.assertEqual(mapping_base['name']['analyzer'], 'lowercase')
| {
"repo_name": "storborg/pyramid_es",
"path": "pyramid_es/tests/test_mixin.py",
"copies": "1",
"size": "3384",
"license": "mit",
"hash": 2345000204255967700,
"line_mean": 29.2142857143,
"line_max": 71,
"alpha_frac": 0.5345744681,
"autogenerated": false,
"ratio": 4.316326530612245,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5350900998712245,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from unittest import TestCase
from webob import Request, Response
import webtest
from gimlet.factories import session_factory_factory
class TestSession(TestCase):
def _make_session(self, secret='secret', **options):
request = Request.blank('/')
return session_factory_factory(secret, **options)(request)
def test_session(self):
sess = self._make_session()
sess['a'] = 'a'
self.assertIn('a', sess)
self.assertIn('a', sess.channels['nonperm'])
def test_session_nonperm(self):
sess = self._make_session()
sess.set('a', 'a', permanent=False)
self.assertIn('a', sess.channels['nonperm'])
self.assertNotIn('a', sess.channels['perm'])
def test_invalidate(self):
sess = self._make_session()
sess['a'] = 'a'
self.assertIn('a', sess)
sess.invalidate()
self.assertNotIn('a', sess)
def test_flash(self):
sess = self._make_session()
self.assertEqual(sess.peek_flash(), [])
sess.flash('abc')
sess.flash('abc')
self.assertEqual(sess.peek_flash(), ['abc', 'abc'])
self.assertEqual(sess.pop_flash(), ['abc', 'abc'])
self.assertEqual(sess.peek_flash(), [])
sess.flash('xyz', allow_duplicate=False)
sess.flash('xyz', allow_duplicate=False)
self.assertEqual(sess.peek_flash(), ['xyz'])
def test_csrf(self):
sess = self._make_session()
self.assertNotIn('_csrft_', sess)
token = sess.get_csrf_token()
self.assertIn('_csrft_', sess)
self.assertIsInstance(token, str)
self.assertEqual(token, sess.get_csrf_token())
class TestRequest(webtest.TestRequest):
@property
def session(self):
return self.environ['gimlet.session']
class TestApp(webtest.TestApp):
RequestClass = TestRequest
class App(object):
def __init__(self):
self.session_factory = session_factory_factory('secret')
def __call__(self, environ, start_response):
request = TestRequest(environ)
environ['gimlet.session'] = self.session_factory(request)
view_name = request.path_info_pop()
view = getattr(self, view_name)
response = view(request)
request.session.write_callback(request, response)
return response(environ, start_response)
def get(self, request):
return Response('get')
def set(self, request):
request.session['a'] = 'a'
return Response('set')
def invalidate(self, request):
request.session.invalidate()
return Response('invalidate')
def mutate_set(self, request):
request.session['b'] = {'bar': 42}
return Response('mutate_set')
def mutate_get(self, request):
s = ','.join(['%s:%s' % (k, v)
for k, v in sorted(request.session['b'].items())])
return Response(s)
def mutate_nosave(self, request):
request.session['b']['foo'] = 123
return Response('mutate_nosave')
def mutate_save(self, request):
request.session['b']['foo'] = 123
request.session.save()
return Response('mutate_save')
def mangle_cookie(self, request):
resp = Response('mangle_cookie')
resp.set_cookie('gimlet-p', request.cookies['gimlet-p'].lower())
return resp
class TestSession_Functional(TestCase):
def setUp(self):
self.app = TestApp(App())
def test_invalidate(self):
# First request has no cookies; this sets them
res = self.app.get('/set')
self.assertEqual(res.request.cookies, {})
self.assertIn('Set-Cookie', res.headers)
# Next request should contain cookies
res = self.app.get('/get')
self.assert_(res.request.cookies)
self.assertIn('gimlet-p', res.request.cookies)
old_cookie_value = res.request.cookies['gimlet-p']
self.assert_(old_cookie_value)
# Invalidation should empty the session and set a new cookie
res = self.app.get('/invalidate')
self.assertIn('Set-Cookie', res.headers)
self.assertEqual(res.request.session, {})
res = self.app.get('/get')
self.assertIn('gimlet-p', res.request.cookies)
new_cookie_value = res.request.cookies['gimlet-p']
self.assert_(new_cookie_value)
self.assertNotEqual(new_cookie_value, old_cookie_value)
def test_bad_signature(self):
# First request has no cookies; this sets them
res = self.app.get('/set')
self.assertEqual(res.request.cookies, {})
self.assertIn('Set-Cookie', res.headers)
# Mangle cookie
orig_cookie = self.app.cookies['gimlet-p']
self.app.get('/mangle_cookie')
mangled_cookie = self.app.cookies['gimlet-p']
self.assertEqual(mangled_cookie, orig_cookie.lower())
# Next request should succeed and then set a new cookie
self.app.get('/get')
self.assertIn('gimlet-p', self.app.cookies)
self.assertNotEqual(self.app.cookies['gimlet-p'], orig_cookie)
self.assertNotEqual(self.app.cookies['gimlet-p'], mangled_cookie)
def test_mutate(self):
# First set a key.
res = self.app.get('/mutate_set')
self.assertIn('Set-Cookie', res.headers)
# Check it
res = self.app.get('/mutate_get')
self.assertEqual(res.body.decode('utf8'), 'bar:42')
# Update the key without saving
res = self.app.get('/mutate_nosave')
res.mustcontain('mutate_nosave')
# Check again, it shouldn't be saved
res = self.app.get('/mutate_get')
self.assertEqual(res.body.decode('utf8'), 'bar:42')
# Now update the key with saving
res = self.app.get('/mutate_save')
res.mustcontain('mutate_save')
# Check again, it should be saved
res = self.app.get('/mutate_get')
self.assertEqual(res.body.decode('utf8'), 'bar:42,foo:123')
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/tests/test_session.py",
"copies": "1",
"size": "6083",
"license": "mit",
"hash": -5396812966871902000,
"line_mean": 32.9832402235,
"line_max": 73,
"alpha_frac": 0.6072661516,
"autogenerated": false,
"ratio": 3.7712337259764412,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4878499877576441,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..utils import date_breaks, date_format
from .scale import scale
from copy import deepcopy
import six
class scale_x_date(scale):
"""
Position scale, date
Parameters
----------
breaks : string / list of breaks
1) a string specifying the width between breaks.
2) the result of a valid call to `date_breaks`
3) a vector of breaks (TODO: not implemented yet!)
Examples
--------
>>> # 1) manually pass in breaks=date_breaks()
>>> print(ggplot(meat, aes('date','beef')) + \\
... geom_line() + \\
... scale_x_date(breaks=date_breaks('10 years'),
... labels=date_format('%B %-d, %Y')))
>>> # 2) or breaks as just a string
>>> print(ggplot(meat, aes('date','beef')) + \\
... geom_line() + \\
... scale_x_date(breaks='10 years',
... labels=date_format('%B %-d, %Y')))
"""
VALID_SCALES = ['name', 'labels', 'limits', 'breaks', 'trans']
def __radd__(self, gg):
gg = deepcopy(gg)
if self.name:
gg.xlab = self.name.title()
if not (self.labels is None):
if isinstance(self.labels, six.string_types):
self.labels = date_format(self.labels)
gg.xtick_formatter = self.labels
if not (self.limits is None):
gg.xlimits = self.limits
if not (self.breaks is None):
if isinstance(self.breaks, six.string_types):
self.breaks = date_breaks(self.breaks)
gg.xmajor_locator = self.breaks
return gg
| {
"repo_name": "benslice/ggplot",
"path": "ggplot/scales/scale_x_date.py",
"copies": "12",
"size": "1687",
"license": "bsd-2-clause",
"hash": -1079421989471339500,
"line_mean": 34.1458333333,
"line_max": 66,
"alpha_frac": 0.5459395376,
"autogenerated": false,
"ratio": 3.740576496674058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000744047619047619,
"num_lines": 48
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# geoms
from .geom_abline import geom_abline
from .geom_area import geom_area
from .geom_bar import geom_bar
from .geom_blank import geom_blank
from .geom_boxplot import geom_boxplot
from .geom_density import geom_density
from .geom_dotplot import geom_dotplot
from .geom_histogram import geom_histogram
from .geom_hline import geom_hline
from .geom_jitter import geom_jitter
from .geom_line import geom_line
from .geom_linerange import geom_linerange
from .geom_now_its_art import geom_now_its_art
from .geom_path import geom_path
from .geom_point import geom_point
from .geom_pointrange import geom_pointrange
from .geom_rect import geom_rect
from .geom_smooth import geom_smooth
from .geom_step import geom_step
from .geom_text import geom_text
from .geom_tile import geom_tile
from .geom_vline import geom_vline
# misc
from .facet_grid import facet_grid
from .facet_wrap import facet_wrap
from .chart_components import *
__facet__ = ['facet_grid', 'facet_wrap']
__geoms__ = ['geom_abline', 'geom_area', 'geom_bar', 'geom_boxplot', 'geom_density',
'geom_dotplot', 'geom_blank', 'geom_linerange', 'geom_pointrange',
'geom_histogram', 'geom_hline', 'geom_jitter', 'geom_line', 'geom_linerange',
'geom_now_its_art', 'geom_path', 'geom_point', 'geom_pointrange', 'geom_rect',
'geom_step', 'geom_smooth', 'geom_text', 'geom_tile',
'geom_vline']
__components__ = ['ylab', 'xlab', 'ylim', 'xlim', 'labs', 'ggtitle']
__all__ = __geoms__ + __facet__ + __components__
__all__ = [str(u) for u in __all__]
| {
"repo_name": "andnovar/ggplot",
"path": "ggplot/geoms/__init__.py",
"copies": "12",
"size": "1672",
"license": "bsd-2-clause",
"hash": 6418944250273414000,
"line_mean": 39.7804878049,
"line_max": 91,
"alpha_frac": 0.6889952153,
"autogenerated": false,
"ratio": 3.2153846153846155,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9904379830684615,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import binascii
from six.moves import cPickle as pickle
from struct import Struct
from itsdangerous import Serializer, URLSafeSerializerMixin
class CookieSerializer(Serializer):
packer = Struct(str('16si'))
def __init__(self, secret, backend, crypter):
Serializer.__init__(self, secret)
self.backend = backend
self.crypter = crypter
def load_payload(self, payload):
"""
Convert a cookie into a SessionChannel instance.
"""
if self.crypter:
payload = self.crypter.decrypt(payload)
raw_id, created_timestamp = \
self.packer.unpack(payload[:self.packer.size])
client_data_pkl = payload[self.packer.size:]
id = binascii.hexlify(raw_id)
client_data = pickle.loads(client_data_pkl)
return id, created_timestamp, client_data
def dump_payload(self, channel):
"""
Convert a Session instance into a cookie by packing it precisely into a
string.
"""
client_data_pkl = pickle.dumps(channel.client_data)
raw_id = binascii.unhexlify(channel.id)
payload = (self.packer.pack(raw_id, channel.created_timestamp) +
client_data_pkl)
if self.crypter:
payload = self.crypter.encrypt(payload)
return payload
class URLSafeCookieSerializer(URLSafeSerializerMixin, CookieSerializer):
pass
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/serializer.py",
"copies": "1",
"size": "1529",
"license": "mit",
"hash": -6069088264501437000,
"line_mean": 28.4038461538,
"line_max": 79,
"alpha_frac": 0.6376716808,
"autogenerated": false,
"ratio": 4.088235294117647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5225906974917647,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import binascii
class Crypter(object):
recommended = ("The recommended method for generating the key is "
"hexlify(os.urandom(32)).")
def __init__(self, key):
from Crypto.Cipher import AES
try:
key = binascii.unhexlify(key)
except TypeError:
raise ValueError("Encryption key must be 64 hex digits (32 bytes"
"). " + self.recommended)
if len(key) not in (16, 24, 32):
raise ValueError("Encryption key must be 16, 24, or 32 bytes. " +
self.recommended)
self.aes = AES.new(key, AES.MODE_ECB)
def pad(self, cleartext):
extra = 16 - (len(cleartext) % 16)
cleartext += (b'\0' * extra)
return cleartext
def unpad(self, cleartext):
return cleartext.rstrip(b'\0')
def encrypt(self, cleartext):
return self.aes.encrypt(self.pad(cleartext))
def decrypt(self, ciphertext):
return self.unpad(self.aes.decrypt(ciphertext))
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/crypto.py",
"copies": "1",
"size": "1153",
"license": "mit",
"hash": 895385405003146900,
"line_mean": 30.1621621622,
"line_max": 77,
"alpha_frac": 0.5715524718,
"autogenerated": false,
"ratio": 4.103202846975089,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5174755318775088,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import enum
import math
import numpy
import logging
try: # pragma: no cover
from collections import abc
except ImportError: # pragma: no cover
import collections as abc
from python_utils import logger
from .utils import s
#: When removing empty areas, remove areas that are smaller than this
AREA_SIZE_THRESHOLD = 0
#: Vectors in a point
VECTORS = 3
#: Dimensions used in a vector
DIMENSIONS = 3
class Dimension(enum.IntEnum):
#: X index (for example, `mesh.v0[0][X]`)
X = 0
#: Y index (for example, `mesh.v0[0][Y]`)
Y = 1
#: Z index (for example, `mesh.v0[0][Z]`)
Z = 2
# For backwards compatibility, leave the original references
X = Dimension.X
Y = Dimension.Y
Z = Dimension.Z
class RemoveDuplicates(enum.Enum):
'''
Choose whether to remove no duplicates, leave only a single of the
duplicates or remove all duplicates (leaving holes).
'''
NONE = 0
SINGLE = 1
ALL = 2
@classmethod
def map(cls, value):
if value is True:
value = cls.SINGLE
elif value and value in cls:
pass
else:
value = cls.NONE
return value
def logged(class_):
# For some reason the Logged baseclass is not properly initiated on Linux
# systems while this works on OS X. Please let me know if you can tell me
# what silly mistake I made here
logger_name = logger.Logged._Logged__get_name(
__name__,
class_.__name__,
)
class_.logger = logging.getLogger(logger_name)
for key in dir(logger.Logged):
if not key.startswith('__'):
setattr(class_, key, getattr(class_, key))
return class_
@logged
class BaseMesh(logger.Logged, abc.Mapping):
'''
Mesh object with easy access to the vectors through v0, v1 and v2.
The normals, areas, min, max and units are calculated automatically.
:param numpy.array data: The data for this mesh
:param bool calculate_normals: Whether to calculate the normals
:param bool remove_empty_areas: Whether to remove triangles with 0 area
(due to rounding errors for example)
:ivar str name: Name of the solid, only exists in ASCII files
:ivar numpy.array data: Data as :func:`BaseMesh.dtype`
:ivar numpy.array points: All points (Nx9)
:ivar numpy.array normals: Normals for this mesh, calculated automatically
by default (Nx3)
:ivar numpy.array vectors: Vectors in the mesh (Nx3x3)
:ivar numpy.array attr: Attributes per vector (used by binary STL)
:ivar numpy.array x: Points on the X axis by vertex (Nx3)
:ivar numpy.array y: Points on the Y axis by vertex (Nx3)
:ivar numpy.array z: Points on the Z axis by vertex (Nx3)
:ivar numpy.array v0: Points in vector 0 (Nx3)
:ivar numpy.array v1: Points in vector 1 (Nx3)
:ivar numpy.array v2: Points in vector 2 (Nx3)
>>> data = numpy.zeros(10, dtype=BaseMesh.dtype)
>>> mesh = BaseMesh(data, remove_empty_areas=False)
>>> # Increment vector 0 item 0
>>> mesh.v0[0] += 1
>>> mesh.v1[0] += 2
>>> # Check item 0 (contains v0, v1 and v2)
>>> assert numpy.array_equal(
... mesh[0],
... numpy.array([1., 1., 1., 2., 2., 2., 0., 0., 0.]))
>>> assert numpy.array_equal(
... mesh.vectors[0],
... numpy.array([[1., 1., 1.],
... [2., 2., 2.],
... [0., 0., 0.]]))
>>> assert numpy.array_equal(
... mesh.v0[0],
... numpy.array([1., 1., 1.]))
>>> assert numpy.array_equal(
... mesh.points[0],
... numpy.array([1., 1., 1., 2., 2., 2., 0., 0., 0.]))
>>> assert numpy.array_equal(
... mesh.data[0],
... numpy.array((
... [0., 0., 0.],
... [[1., 1., 1.], [2., 2., 2.], [0., 0., 0.]],
... [0]),
... dtype=BaseMesh.dtype))
>>> assert numpy.array_equal(mesh.x[0], numpy.array([1., 2., 0.]))
>>> mesh[0] = 3
>>> assert numpy.array_equal(
... mesh[0],
... numpy.array([3., 3., 3., 3., 3., 3., 3., 3., 3.]))
>>> len(mesh) == len(list(mesh))
True
>>> (mesh.min_ < mesh.max_).all()
True
>>> mesh.update_normals()
>>> mesh.units.sum()
0.0
>>> mesh.v0[:] = mesh.v1[:] = mesh.v2[:] = 0
>>> mesh.points.sum()
0.0
>>> mesh.v0 = mesh.v1 = mesh.v2 = 0
>>> mesh.x = mesh.y = mesh.z = 0
>>> mesh.attr = 1
>>> (mesh.attr == 1).all()
True
>>> mesh.normals = 2
>>> (mesh.normals == 2).all()
True
>>> mesh.vectors = 3
>>> (mesh.vectors == 3).all()
True
>>> mesh.points = 4
>>> (mesh.points == 4).all()
True
'''
#: - normals: :func:`numpy.float32`, `(3, )`
#: - vectors: :func:`numpy.float32`, `(3, 3)`
#: - attr: :func:`numpy.uint16`, `(1, )`
dtype = numpy.dtype([
(s('normals'), numpy.float32, (3, )),
(s('vectors'), numpy.float32, (3, 3)),
(s('attr'), numpy.uint16, (1, )),
])
dtype = dtype.newbyteorder('<') # Even on big endian arches, use little e.
def __init__(self, data, calculate_normals=True,
remove_empty_areas=False,
remove_duplicate_polygons=RemoveDuplicates.NONE,
name='', speedups=True, **kwargs):
super(BaseMesh, self).__init__(**kwargs)
self.speedups = speedups
if remove_empty_areas:
data = self.remove_empty_areas(data)
if RemoveDuplicates.map(remove_duplicate_polygons).value:
data = self.remove_duplicate_polygons(data,
remove_duplicate_polygons)
self.name = name
self.data = data
if calculate_normals:
self.update_normals()
@property
def attr(self):
return self.data['attr']
@attr.setter
def attr(self, value):
self.data['attr'] = value
@property
def normals(self):
return self.data['normals']
@normals.setter
def normals(self, value):
self.data['normals'] = value
@property
def vectors(self):
return self.data['vectors']
@vectors.setter
def vectors(self, value):
self.data['vectors'] = value
@property
def points(self):
return self.vectors.reshape(self.data.size, 9)
@points.setter
def points(self, value):
self.points[:] = value
@property
def v0(self):
return self.vectors[:, 0]
@v0.setter
def v0(self, value):
self.vectors[:, 0] = value
@property
def v1(self):
return self.vectors[:, 1]
@v1.setter
def v1(self, value):
self.vectors[:, 1] = value
@property
def v2(self):
return self.vectors[:, 2]
@v2.setter
def v2(self, value):
self.vectors[:, 2] = value
@property
def x(self):
return self.points[:, Dimension.X::3]
@x.setter
def x(self, value):
self.points[:, Dimension.X::3] = value
@property
def y(self):
return self.points[:, Dimension.Y::3]
@y.setter
def y(self, value):
self.points[:, Dimension.Y::3] = value
@property
def z(self):
return self.points[:, Dimension.Z::3]
@z.setter
def z(self, value):
self.points[:, Dimension.Z::3] = value
@classmethod
def remove_duplicate_polygons(cls, data, value=RemoveDuplicates.SINGLE):
value = RemoveDuplicates.map(value)
polygons = data['vectors'].sum(axis=1)
# Get a sorted list of indices
idx = numpy.lexsort(polygons.T)
# Get the indices of all different indices
diff = numpy.any(polygons[idx[1:]] != polygons[idx[:-1]], axis=1)
if value is RemoveDuplicates.SINGLE:
# Only return the unique data, the True is so we always get at
# least the originals
return data[numpy.sort(idx[numpy.concatenate(([True], diff))])]
elif value is RemoveDuplicates.ALL:
# We need to return both items of the shifted diff
diff_a = numpy.concatenate(([True], diff))
diff_b = numpy.concatenate((diff, [True]))
diff = numpy.concatenate((diff, [False]))
# Combine both unique lists
filtered_data = data[numpy.sort(idx[diff_a & diff_b])]
if len(filtered_data) <= len(data) / 2:
return data[numpy.sort(idx[diff_a])]
else:
return data[numpy.sort(idx[diff])]
else:
return data
@classmethod
def remove_empty_areas(cls, data):
vectors = data['vectors']
v0 = vectors[:, 0]
v1 = vectors[:, 1]
v2 = vectors[:, 2]
normals = numpy.cross(v1 - v0, v2 - v0)
squared_areas = (normals ** 2).sum(axis=1)
return data[squared_areas > AREA_SIZE_THRESHOLD ** 2]
def update_normals(self, update_areas=True):
'''Update the normals and areas for all points'''
normals = numpy.cross(self.v1 - self.v0, self.v2 - self.v0)
if update_areas:
self.update_areas(normals)
self.normals[:] = normals
def get_unit_normals(self):
normals = self.normals.copy()
normal = numpy.linalg.norm(normals, axis=1)
non_zero = normal > 0
if non_zero.any():
normals[non_zero] /= normal[non_zero][:, None]
return normals
def update_min(self):
self._min = self.vectors.min(axis=(0, 1))
def update_max(self):
self._max = self.vectors.max(axis=(0, 1))
def update_areas(self, normals=None):
if normals is None:
normals = numpy.cross(self.v1 - self.v0, self.v2 - self.v0)
areas = .5 * numpy.sqrt((normals ** 2).sum(axis=1))
self.areas = areas.reshape((areas.size, 1))
def check(self):
'''Check the mesh is valid or not'''
return self.is_closed()
def is_closed(self): # pragma: no cover
"""Check the mesh is closed or not"""
if numpy.isclose(self.normals.sum(axis=0), 0, atol=1e-4).all():
return True
else:
self.warning('''
Your mesh is not closed, the mass methods will not function
correctly on this mesh. For more info:
https://github.com/WoLpH/numpy-stl/issues/69
'''.strip())
return False
def get_mass_properties(self):
'''
Evaluate and return a tuple with the following elements:
- the volume
- the position of the center of gravity (COG)
- the inertia matrix expressed at the COG
Documentation can be found here:
http://www.geometrictools.com/Documentation/PolyhedralMassProperties.pdf
'''
self.check()
def subexpression(x):
w0, w1, w2 = x[:, 0], x[:, 1], x[:, 2]
temp0 = w0 + w1
f1 = temp0 + w2
temp1 = w0 * w0
temp2 = temp1 + w1 * temp0
f2 = temp2 + w2 * f1
f3 = w0 * temp1 + w1 * temp2 + w2 * f2
g0 = f2 + w0 * (f1 + w0)
g1 = f2 + w1 * (f1 + w1)
g2 = f2 + w2 * (f1 + w2)
return f1, f2, f3, g0, g1, g2
x0, x1, x2 = self.x[:, 0], self.x[:, 1], self.x[:, 2]
y0, y1, y2 = self.y[:, 0], self.y[:, 1], self.y[:, 2]
z0, z1, z2 = self.z[:, 0], self.z[:, 1], self.z[:, 2]
a1, b1, c1 = x1 - x0, y1 - y0, z1 - z0
a2, b2, c2 = x2 - x0, y2 - y0, z2 - z0
d0, d1, d2 = b1 * c2 - b2 * c1, a2 * c1 - a1 * c2, a1 * b2 - a2 * b1
f1x, f2x, f3x, g0x, g1x, g2x = subexpression(self.x)
f1y, f2y, f3y, g0y, g1y, g2y = subexpression(self.y)
f1z, f2z, f3z, g0z, g1z, g2z = subexpression(self.z)
intg = numpy.zeros((10))
intg[0] = sum(d0 * f1x)
intg[1:4] = sum(d0 * f2x), sum(d1 * f2y), sum(d2 * f2z)
intg[4:7] = sum(d0 * f3x), sum(d1 * f3y), sum(d2 * f3z)
intg[7] = sum(d0 * (y0 * g0x + y1 * g1x + y2 * g2x))
intg[8] = sum(d1 * (z0 * g0y + z1 * g1y + z2 * g2y))
intg[9] = sum(d2 * (x0 * g0z + x1 * g1z + x2 * g2z))
intg /= numpy.array([6, 24, 24, 24, 60, 60, 60, 120, 120, 120])
volume = intg[0]
cog = intg[1:4] / volume
cogsq = cog ** 2
inertia = numpy.zeros((3, 3))
inertia[0, 0] = intg[5] + intg[6] - volume * (cogsq[1] + cogsq[2])
inertia[1, 1] = intg[4] + intg[6] - volume * (cogsq[2] + cogsq[0])
inertia[2, 2] = intg[4] + intg[5] - volume * (cogsq[0] + cogsq[1])
inertia[0, 1] = inertia[1, 0] = -(intg[7] - volume * cog[0] * cog[1])
inertia[1, 2] = inertia[2, 1] = -(intg[8] - volume * cog[1] * cog[2])
inertia[0, 2] = inertia[2, 0] = -(intg[9] - volume * cog[2] * cog[0])
return volume, cog, inertia
def update_units(self):
units = self.normals.copy()
non_zero_areas = self.areas > 0
areas = self.areas
if non_zero_areas.shape[0] != areas.shape[0]: # pragma: no cover
self.warning('Zero sized areas found, '
'units calculation will be partially incorrect')
if non_zero_areas.any():
non_zero_areas.shape = non_zero_areas.shape[0]
areas = numpy.hstack((2 * areas[non_zero_areas],) * DIMENSIONS)
units[non_zero_areas] /= areas
self.units = units
@classmethod
def rotation_matrix(cls, axis, theta):
'''
Generate a rotation matrix to Rotate the matrix over the given axis by
the given theta (angle)
Uses the `Euler-Rodrigues
<https://en.wikipedia.org/wiki/Euler%E2%80%93Rodrigues_formula>`_
formula for fast rotations.
:param numpy.array axis: Axis to rotate over (x, y, z)
:param float theta: Rotation angle in radians, use `math.radians` to
convert degrees to radians if needed.
'''
axis = numpy.asarray(axis)
# No need to rotate if there is no actual rotation
if not axis.any():
return numpy.identity(3)
theta = 0.5 * numpy.asarray(theta)
axis = axis / numpy.linalg.norm(axis)
a = math.cos(theta)
b, c, d = - axis * math.sin(theta)
angles = a, b, c, d
powers = [x * y for x in angles for y in angles]
aa, ab, ac, ad = powers[0:4]
ba, bb, bc, bd = powers[4:8]
ca, cb, cc, cd = powers[8:12]
da, db, dc, dd = powers[12:16]
return numpy.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
def rotate(self, axis, theta=0, point=None):
'''
Rotate the matrix over the given axis by the given theta (angle)
Uses the :py:func:`rotation_matrix` in the background.
.. note:: Note that the `point` was accidentaly inverted with the
old version of the code. To get the old and incorrect behaviour
simply pass `-point` instead of `point` or `-numpy.array(point)` if
you're passing along an array.
:param numpy.array axis: Axis to rotate over (x, y, z)
:param float theta: Rotation angle in radians, use `math.radians` to
convert degrees to radians if needed.
:param numpy.array point: Rotation point so manual translation is not
required
'''
# No need to rotate if there is no actual rotation
if not theta:
return
self.rotate_using_matrix(self.rotation_matrix(axis, theta), point)
def rotate_using_matrix(self, rotation_matrix, point=None):
'''
Rotate using a given rotation matrix and optional rotation point
Note that this rotation produces clockwise rotations for positive
angles which is arguably incorrect but will remain for legacy reasons.
For more details, read here:
https://github.com/WoLpH/numpy-stl/issues/166
'''
identity = numpy.identity(rotation_matrix.shape[0])
# No need to rotate if there is no actual rotation
if not rotation_matrix.any() or (identity == rotation_matrix).all():
return
if isinstance(point, (numpy.ndarray, list, tuple)) and len(point) == 3:
point = numpy.asarray(point)
elif point is None:
point = numpy.array([0, 0, 0])
elif isinstance(point, (int, float)):
point = numpy.asarray([point] * 3)
else:
raise TypeError('Incorrect type for point', point)
def _rotate(matrix):
if point.any():
# Translate while rotating
return (matrix - point).dot(rotation_matrix) + point
else:
# Simply apply the rotation
return matrix.dot(rotation_matrix)
# Rotate the normals
self.normals[:] = _rotate(self.normals[:])
# Rotate the vectors
for i in range(3):
self.vectors[:, i] = _rotate(self.vectors[:, i])
def translate(self, translation):
'''
Translate the mesh in the three directions
:param numpy.array translation: Translation vector (x, y, z)
'''
assert len(translation) == 3, "Translation vector must be of length 3"
self.x += translation[0]
self.y += translation[1]
self.z += translation[2]
def transform(self, matrix):
'''
Transform the mesh with a rotation and a translation stored in a
single 4x4 matrix
:param numpy.array matrix: Transform matrix with shape (4, 4), where
matrix[0:3, 0:3] represents the rotation
part of the transformation
matrix[0:3, 3] represents the translation
part of the transformation
'''
is_a_4x4_matrix = matrix.shape == (4, 4)
assert is_a_4x4_matrix, "Transformation matrix must be of shape (4, 4)"
rotation = matrix[0:3, 0:3]
unit_det_rotation = numpy.allclose(numpy.linalg.det(rotation), 1.0)
assert unit_det_rotation, "Rotation matrix has not a unit determinant"
for i in range(3):
self.vectors[:, i] = numpy.dot(rotation, self.vectors[:, i].T).T
self.x += matrix[0, 3]
self.y += matrix[1, 3]
self.z += matrix[2, 3]
def _get_or_update(key):
def _get(self):
if not hasattr(self, '_%s' % key):
getattr(self, 'update_%s' % key)()
return getattr(self, '_%s' % key)
return _get
def _set(key):
def _set(self, value):
setattr(self, '_%s' % key, value)
return _set
min_ = property(_get_or_update('min'), _set('min'),
doc='Mesh minimum value')
max_ = property(_get_or_update('max'), _set('max'),
doc='Mesh maximum value')
areas = property(_get_or_update('areas'), _set('areas'),
doc='Mesh areas')
units = property(_get_or_update('units'), _set('units'),
doc='Mesh unit vectors')
def __getitem__(self, k):
return self.points[k]
def __setitem__(self, k, v):
self.points[k] = v
def __len__(self):
return self.points.shape[0]
def __iter__(self):
for point in self.points:
yield point
def get_mass_properties_with_density(self, density):
# add density for mesh,density unit kg/m3 when mesh is unit is m
self.check()
def subexpression(x):
w0, w1, w2 = x[:, 0], x[:, 1], x[:, 2]
temp0 = w0 + w1
f1 = temp0 + w2
temp1 = w0 * w0
temp2 = temp1 + w1 * temp0
f2 = temp2 + w2 * f1
f3 = w0 * temp1 + w1 * temp2 + w2 * f2
g0 = f2 + w0 * (f1 + w0)
g1 = f2 + w1 * (f1 + w1)
g2 = f2 + w2 * (f1 + w2)
return f1, f2, f3, g0, g1, g2
x0, x1, x2 = self.x[:, 0], self.x[:, 1], self.x[:, 2]
y0, y1, y2 = self.y[:, 0], self.y[:, 1], self.y[:, 2]
z0, z1, z2 = self.z[:, 0], self.z[:, 1], self.z[:, 2]
a1, b1, c1 = x1 - x0, y1 - y0, z1 - z0
a2, b2, c2 = x2 - x0, y2 - y0, z2 - z0
d0, d1, d2 = b1 * c2 - b2 * c1, a2 * c1 - a1 * c2, a1 * b2 - a2 * b1
f1x, f2x, f3x, g0x, g1x, g2x = subexpression(self.x)
f1y, f2y, f3y, g0y, g1y, g2y = subexpression(self.y)
f1z, f2z, f3z, g0z, g1z, g2z = subexpression(self.z)
intg = numpy.zeros((10))
intg[0] = sum(d0 * f1x)
intg[1:4] = sum(d0 * f2x), sum(d1 * f2y), sum(d2 * f2z)
intg[4:7] = sum(d0 * f3x), sum(d1 * f3y), sum(d2 * f3z)
intg[7] = sum(d0 * (y0 * g0x + y1 * g1x + y2 * g2x))
intg[8] = sum(d1 * (z0 * g0y + z1 * g1y + z2 * g2y))
intg[9] = sum(d2 * (x0 * g0z + x1 * g1z + x2 * g2z))
intg /= numpy.array([6, 24, 24, 24, 60, 60, 60, 120, 120, 120])
volume = intg[0]
cog = intg[1:4] / volume
cogsq = cog ** 2
vmass = volume * density
inertia = numpy.zeros((3, 3))
inertia[0, 0] = (intg[5] + intg[6]) * density - vmass * (
cogsq[1] + cogsq[2])
inertia[1, 1] = (intg[4] + intg[6]) * density - vmass * (
cogsq[2] + cogsq[0])
inertia[2, 2] = (intg[4] + intg[5]) * density - vmass * (
cogsq[0] + cogsq[1])
inertia[0, 1] = inertia[1, 0] = -(
intg[7] * density - vmass * cog[0] * cog[1])
inertia[1, 2] = inertia[2, 1] = -(
intg[8] * density - vmass * cog[1] * cog[2])
inertia[0, 2] = inertia[2, 0] = -(
intg[9] * density - vmass * cog[2] * cog[0])
return volume, vmass, cog, inertia
| {
"repo_name": "WoLpH/numpy-stl",
"path": "stl/base.py",
"copies": "1",
"size": "22070",
"license": "bsd-3-clause",
"hash": 6766210410140129000,
"line_mean": 32.5920852359,
"line_max": 80,
"alpha_frac": 0.5338921613,
"autogenerated": false,
"ratio": 3.296982372273678,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43308745335736776,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
from weakref import ref
from matplotlib.externals import six
from datetime import datetime
import numpy as np
from numpy.testing.utils import (assert_array_equal, assert_approx_equal,
assert_array_almost_equal)
from nose.tools import (assert_equal, assert_not_equal, raises, assert_true,
assert_raises)
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
from matplotlib.cbook import delete_masked_points as dmp
def test_is_string_like():
y = np.arange(10)
assert_equal(cbook.is_string_like(y), False)
y.shape = 10, 1
assert_equal(cbook.is_string_like(y), False)
y.shape = 1, 10
assert_equal(cbook.is_string_like(y), False)
assert cbook.is_string_like("hello world")
assert_equal(cbook.is_string_like(10), False)
y = ['a', 'b', 'c']
assert_equal(cbook.is_string_like(y), False)
y = np.array(y)
assert_equal(cbook.is_string_like(y), False)
y = np.array(y, dtype=object)
assert cbook.is_string_like(y)
def test_is_sequence_of_strings():
y = ['a', 'b', 'c']
assert cbook.is_sequence_of_strings(y)
y = np.array(y, dtype=object)
assert cbook.is_sequence_of_strings(y)
def test_restrict_dict():
d = {'foo': 'bar', 1: 2}
d1 = cbook.restrict_dict(d, ['foo', 1])
assert_equal(d1, d)
d2 = cbook.restrict_dict(d, ['bar', 2])
assert_equal(d2, {})
d3 = cbook.restrict_dict(d, {'foo': 1})
assert_equal(d3, {'foo': 'bar'})
d4 = cbook.restrict_dict(d, {})
assert_equal(d4, {})
d5 = cbook.restrict_dict(d, set(['foo', 2]))
assert_equal(d5, {'foo': 'bar'})
# check that d was not modified
assert_equal(d, {'foo': 'bar', 1: 2})
class Test_delete_masked_points(object):
def setUp(self):
self.mask1 = [False, False, True, True, False, False]
self.arr0 = np.arange(1.0, 7.0)
self.arr1 = [1, 2, 3, np.nan, np.nan, 6]
self.arr2 = np.array(self.arr1)
self.arr3 = np.ma.array(self.arr2, mask=self.mask1)
self.arr_s = ['a', 'b', 'c', 'd', 'e', 'f']
self.arr_s2 = np.array(self.arr_s)
self.arr_dt = [datetime(2008, 1, 1), datetime(2008, 1, 2),
datetime(2008, 1, 3), datetime(2008, 1, 4),
datetime(2008, 1, 5), datetime(2008, 1, 6)]
self.arr_dt2 = np.array(self.arr_dt)
self.arr_colors = ['r', 'g', 'b', 'c', 'm', 'y']
self.arr_rgba = mcolors.colorConverter.to_rgba_array(self.arr_colors)
@raises(ValueError)
def test_bad_first_arg(self):
dmp('a string', self.arr0)
def test_string_seq(self):
actual = dmp(self.arr_s, self.arr1)
ind = [0, 1, 2, 5]
expected = (self.arr_s2.take(ind), self.arr2.take(ind))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_datetime(self):
actual = dmp(self.arr_dt, self.arr3)
ind = [0, 1, 5]
expected = (self.arr_dt2.take(ind),
self.arr3.take(ind).compressed())
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_rgba(self):
actual = dmp(self.arr3, self.arr_rgba)
ind = [0, 1, 5]
expected = (self.arr3.take(ind).compressed(),
self.arr_rgba.take(ind, axis=0))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_allequal():
assert(cbook.allequal([1, 1, 1]))
assert(not cbook.allequal([1, 1, 0]))
assert(cbook.allequal([]))
assert(cbook.allequal(('a', 'a')))
assert(not cbook.allequal(('a', 'b')))
class Test_boxplot_stats(object):
def setup(self):
np.random.seed(937)
self.nrows = 37
self.ncols = 4
self.data = np.random.lognormal(size=(self.nrows, self.ncols),
mean=1.5, sigma=1.75)
self.known_keys = sorted([
'mean', 'med', 'q1', 'q3', 'iqr',
'cilo', 'cihi', 'whislo', 'whishi',
'fliers', 'label'
])
self.std_results = cbook.boxplot_stats(self.data)
self.known_nonbootstrapped_res = {
'cihi': 6.8161283264444847,
'cilo': -0.1489815330368689,
'iqr': 13.492709959447094,
'mean': 13.00447442387868,
'med': 3.3335733967038079,
'fliers': np.array([
92.55467075, 87.03819018, 42.23204914, 39.29390996
]),
'q1': 1.3597529879465153,
'q3': 14.85246294739361,
'whishi': 27.899688243699629,
'whislo': 0.042143774965502923
}
self.known_bootstrapped_ci = {
'cihi': 8.939577523357828,
'cilo': 1.8692703958676578,
}
self.known_whis3_res = {
'whishi': 42.232049135969874,
'whislo': 0.042143774965502923,
'fliers': np.array([92.55467075, 87.03819018]),
}
self.known_res_percentiles = {
'whislo': 0.1933685896907924,
'whishi': 42.232049135969874
}
self.known_res_range = {
'whislo': 0.042143774965502923,
'whishi': 92.554670752188699
}
def test_form_main_list(self):
assert_true(isinstance(self.std_results, list))
def test_form_each_dict(self):
for res in self.std_results:
assert_true(isinstance(res, dict))
def test_form_dict_keys(self):
for res in self.std_results:
keys = sorted(list(res.keys()))
for key in keys:
assert_true(key in self.known_keys)
def test_results_baseline(self):
res = self.std_results[0]
for key in list(self.known_nonbootstrapped_res.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_nonbootstrapped_res[key]
)
def test_results_bootstrapped(self):
results = cbook.boxplot_stats(self.data, bootstrap=10000)
res = results[0]
for key in list(self.known_bootstrapped_ci.keys()):
assert_approx_equal(
res[key],
self.known_bootstrapped_ci[key]
)
def test_results_whiskers_float(self):
results = cbook.boxplot_stats(self.data, whis=3)
res = results[0]
for key in list(self.known_whis3_res.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_whis3_res[key]
)
def test_results_whiskers_range(self):
results = cbook.boxplot_stats(self.data, whis='range')
res = results[0]
for key in list(self.known_res_range.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_res_range[key]
)
def test_results_whiskers_percentiles(self):
results = cbook.boxplot_stats(self.data, whis=[5, 95])
res = results[0]
for key in list(self.known_res_percentiles.keys()):
if key != 'fliers':
assert_statement = assert_approx_equal
else:
assert_statement = assert_array_almost_equal
assert_statement(
res[key],
self.known_res_percentiles[key]
)
def test_results_withlabels(self):
labels = ['Test1', 2, 'ardvark', 4]
results = cbook.boxplot_stats(self.data, labels=labels)
res = results[0]
for lab, res in zip(labels, results):
assert_equal(res['label'], lab)
results = cbook.boxplot_stats(self.data)
for res in results:
assert('label' not in res)
@raises(ValueError)
def test_label_error(self):
labels = [1, 2]
results = cbook.boxplot_stats(self.data, labels=labels)
@raises(ValueError)
def test_bad_dims(self):
data = np.random.normal(size=(34, 34, 34))
results = cbook.boxplot_stats(data)
class Test_callback_registry(object):
def setup(self):
self.signal = 'test'
self.callbacks = cbook.CallbackRegistry()
def connect(self, s, func):
return self.callbacks.connect(s, func)
def is_empty(self):
assert_equal(self.callbacks._func_cid_map, {})
assert_equal(self.callbacks.callbacks, {})
def is_not_empty(self):
assert_not_equal(self.callbacks._func_cid_map, {})
assert_not_equal(self.callbacks.callbacks, {})
def test_callback_complete(self):
# ensure we start with an empty registry
self.is_empty()
# create a class for testing
mini_me = Test_callback_registry()
# test that we can add a callback
cid1 = self.connect(self.signal, mini_me.dummy)
assert_equal(type(cid1), int)
self.is_not_empty()
# test that we don't add a second callback
cid2 = self.connect(self.signal, mini_me.dummy)
assert_equal(cid1, cid2)
self.is_not_empty()
assert_equal(len(self.callbacks._func_cid_map), 1)
assert_equal(len(self.callbacks.callbacks), 1)
del mini_me
# check we now have no callbacks registered
self.is_empty()
def dummy(self):
pass
def test_to_prestep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_prestep(x, y1, y2)
x_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y1_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_prestep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_poststep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_poststep(x, y1, y2)
x_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_poststep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_midstep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_midstep(x, y1, y2)
x_target = np.asarray([0, .5, .5, 1.5, 1.5, 2.5, 2.5, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_midstep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_step_fails():
assert_raises(ValueError, cbook._step_validation,
np.arange(12).reshape(3, 4), 'a')
assert_raises(ValueError, cbook._step_validation,
np.arange(12), 'a')
assert_raises(ValueError, cbook._step_validation,
np.arange(12))
assert_raises(ValueError, cbook._step_validation,
np.arange(12), np.arange(3))
def test_grouper():
class dummy():
pass
a, b, c, d, e = objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
assert set(list(g)[0]) == set(objs)
assert set(g.get_siblings(a)) == set(objs)
for other in objs[1:]:
assert g.joined(a, other)
g.remove(a)
for other in objs[1:]:
assert not g.joined(a, other)
for A, B in itertools.product(objs[1:], objs[1:]):
assert g.joined(A, B)
def test_grouper_private():
class dummy():
pass
objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
# reach in and touch the internals !
mapping = g._mapping
for o in objs:
assert ref(o) in mapping
base_set = mapping[ref(objs[0])]
for o in objs[1:]:
assert mapping[ref(o)] is base_set
| {
"repo_name": "zrhans/pythonanywhere",
"path": ".virtualenvs/django19/lib/python3.4/site-packages/matplotlib/tests/test_cbook.py",
"copies": "2",
"size": "12932",
"license": "apache-2.0",
"hash": 7866534601494931000,
"line_mean": 30.0119904077,
"line_max": 77,
"alpha_frac": 0.5653417878,
"autogenerated": false,
"ratio": 3.131234866828087,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46965766546280874,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import pickle
from weakref import ref
import warnings
import six
from datetime import datetime
import numpy as np
from numpy.testing.utils import (assert_array_equal, assert_approx_equal,
assert_array_almost_equal)
import pytest
import matplotlib.cbook as cbook
import matplotlib.colors as mcolors
from matplotlib.cbook import delete_masked_points as dmp
def test_is_hashable():
s = 'string'
assert cbook.is_hashable(s)
lst = ['list', 'of', 'stings']
assert not cbook.is_hashable(lst)
def test_restrict_dict():
d = {'foo': 'bar', 1: 2}
d1 = cbook.restrict_dict(d, ['foo', 1])
assert d1 == d
d2 = cbook.restrict_dict(d, ['bar', 2])
assert d2 == {}
d3 = cbook.restrict_dict(d, {'foo': 1})
assert d3 == {'foo': 'bar'}
d4 = cbook.restrict_dict(d, {})
assert d4 == {}
d5 = cbook.restrict_dict(d, {'foo', 2})
assert d5 == {'foo': 'bar'}
# check that d was not modified
assert d == {'foo': 'bar', 1: 2}
class Test_delete_masked_points(object):
def setup_method(self):
self.mask1 = [False, False, True, True, False, False]
self.arr0 = np.arange(1.0, 7.0)
self.arr1 = [1, 2, 3, np.nan, np.nan, 6]
self.arr2 = np.array(self.arr1)
self.arr3 = np.ma.array(self.arr2, mask=self.mask1)
self.arr_s = ['a', 'b', 'c', 'd', 'e', 'f']
self.arr_s2 = np.array(self.arr_s)
self.arr_dt = [datetime(2008, 1, 1), datetime(2008, 1, 2),
datetime(2008, 1, 3), datetime(2008, 1, 4),
datetime(2008, 1, 5), datetime(2008, 1, 6)]
self.arr_dt2 = np.array(self.arr_dt)
self.arr_colors = ['r', 'g', 'b', 'c', 'm', 'y']
self.arr_rgba = mcolors.to_rgba_array(self.arr_colors)
def test_bad_first_arg(self):
with pytest.raises(ValueError):
dmp('a string', self.arr0)
def test_string_seq(self):
actual = dmp(self.arr_s, self.arr1)
ind = [0, 1, 2, 5]
expected = (self.arr_s2.take(ind), self.arr2.take(ind))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_datetime(self):
actual = dmp(self.arr_dt, self.arr3)
ind = [0, 1, 5]
expected = (self.arr_dt2.take(ind),
self.arr3.take(ind).compressed())
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
def test_rgba(self):
actual = dmp(self.arr3, self.arr_rgba)
ind = [0, 1, 5]
expected = (self.arr3.take(ind).compressed(),
self.arr_rgba.take(ind, axis=0))
assert_array_equal(actual[0], expected[0])
assert_array_equal(actual[1], expected[1])
class Test_boxplot_stats(object):
def setup(self):
np.random.seed(937)
self.nrows = 37
self.ncols = 4
self.data = np.random.lognormal(size=(self.nrows, self.ncols),
mean=1.5, sigma=1.75)
self.known_keys = sorted([
'mean', 'med', 'q1', 'q3', 'iqr',
'cilo', 'cihi', 'whislo', 'whishi',
'fliers', 'label'
])
self.std_results = cbook.boxplot_stats(self.data)
self.known_nonbootstrapped_res = {
'cihi': 6.8161283264444847,
'cilo': -0.1489815330368689,
'iqr': 13.492709959447094,
'mean': 13.00447442387868,
'med': 3.3335733967038079,
'fliers': np.array([
92.55467075, 87.03819018, 42.23204914, 39.29390996
]),
'q1': 1.3597529879465153,
'q3': 14.85246294739361,
'whishi': 27.899688243699629,
'whislo': 0.042143774965502923
}
self.known_bootstrapped_ci = {
'cihi': 8.939577523357828,
'cilo': 1.8692703958676578,
}
self.known_whis3_res = {
'whishi': 42.232049135969874,
'whislo': 0.042143774965502923,
'fliers': np.array([92.55467075, 87.03819018]),
}
self.known_res_percentiles = {
'whislo': 0.1933685896907924,
'whishi': 42.232049135969874
}
self.known_res_range = {
'whislo': 0.042143774965502923,
'whishi': 92.554670752188699
}
def test_form_main_list(self):
assert isinstance(self.std_results, list)
def test_form_each_dict(self):
for res in self.std_results:
assert isinstance(res, dict)
def test_form_dict_keys(self):
for res in self.std_results:
assert set(res) <= set(self.known_keys)
def test_results_baseline(self):
res = self.std_results[0]
for key, value in self.known_nonbootstrapped_res.items():
assert_array_almost_equal(res[key], value)
def test_results_bootstrapped(self):
results = cbook.boxplot_stats(self.data, bootstrap=10000)
res = results[0]
for key, value in self.known_bootstrapped_ci.items():
assert_approx_equal(res[key], value)
def test_results_whiskers_float(self):
results = cbook.boxplot_stats(self.data, whis=3)
res = results[0]
for key, value in self.known_whis3_res.items():
assert_array_almost_equal(res[key], value)
def test_results_whiskers_range(self):
results = cbook.boxplot_stats(self.data, whis='range')
res = results[0]
for key, value in self.known_res_range.items():
assert_array_almost_equal(res[key], value)
def test_results_whiskers_percentiles(self):
results = cbook.boxplot_stats(self.data, whis=[5, 95])
res = results[0]
for key, value in self.known_res_percentiles.items():
assert_array_almost_equal(res[key], value)
def test_results_withlabels(self):
labels = ['Test1', 2, 'ardvark', 4]
results = cbook.boxplot_stats(self.data, labels=labels)
res = results[0]
for lab, res in zip(labels, results):
assert res['label'] == lab
results = cbook.boxplot_stats(self.data)
for res in results:
assert 'label' not in res
def test_label_error(self):
labels = [1, 2]
with pytest.raises(ValueError):
results = cbook.boxplot_stats(self.data, labels=labels)
def test_bad_dims(self):
data = np.random.normal(size=(34, 34, 34))
with pytest.raises(ValueError):
results = cbook.boxplot_stats(data)
def test_boxplot_stats_autorange_false(self):
x = np.zeros(shape=140)
x = np.hstack([-25, x, 25])
bstats_false = cbook.boxplot_stats(x, autorange=False)
bstats_true = cbook.boxplot_stats(x, autorange=True)
assert bstats_false[0]['whislo'] == 0
assert bstats_false[0]['whishi'] == 0
assert_array_almost_equal(bstats_false[0]['fliers'], [-25, 25])
assert bstats_true[0]['whislo'] == -25
assert bstats_true[0]['whishi'] == 25
assert_array_almost_equal(bstats_true[0]['fliers'], [])
class Test_callback_registry(object):
def setup(self):
self.signal = 'test'
self.callbacks = cbook.CallbackRegistry()
def connect(self, s, func):
return self.callbacks.connect(s, func)
def is_empty(self):
assert self.callbacks._func_cid_map == {}
assert self.callbacks.callbacks == {}
def is_not_empty(self):
assert self.callbacks._func_cid_map != {}
assert self.callbacks.callbacks != {}
def test_callback_complete(self):
# ensure we start with an empty registry
self.is_empty()
# create a class for testing
mini_me = Test_callback_registry()
# test that we can add a callback
cid1 = self.connect(self.signal, mini_me.dummy)
assert type(cid1) == int
self.is_not_empty()
# test that we don't add a second callback
cid2 = self.connect(self.signal, mini_me.dummy)
assert cid1 == cid2
self.is_not_empty()
assert len(self.callbacks._func_cid_map) == 1
assert len(self.callbacks.callbacks) == 1
del mini_me
# check we now have no callbacks registered
self.is_empty()
def dummy(self):
pass
def test_pickling(self):
assert hasattr(pickle.loads(pickle.dumps(cbook.CallbackRegistry())),
"callbacks")
def raising_cb_reg(func):
class TestException(Exception):
pass
def raising_function():
raise RuntimeError
def transformer(excp):
if isinstance(excp, RuntimeError):
raise TestException
raise excp
# default behavior
cb = cbook.CallbackRegistry()
cb.connect('foo', raising_function)
# old default
cb_old = cbook.CallbackRegistry(exception_handler=None)
cb_old.connect('foo', raising_function)
# filter
cb_filt = cbook.CallbackRegistry(exception_handler=transformer)
cb_filt.connect('foo', raising_function)
return pytest.mark.parametrize('cb, excp',
[[cb, None],
[cb_old, RuntimeError],
[cb_filt, TestException]])(func)
@raising_cb_reg
def test_callbackregistry_process_exception(cb, excp):
if excp is not None:
with pytest.raises(excp):
cb.process('foo')
else:
cb.process('foo')
def test_sanitize_sequence():
d = {'a': 1, 'b': 2, 'c': 3}
k = ['a', 'b', 'c']
v = [1, 2, 3]
i = [('a', 1), ('b', 2), ('c', 3)]
assert k == sorted(cbook.sanitize_sequence(d.keys()))
assert v == sorted(cbook.sanitize_sequence(d.values()))
assert i == sorted(cbook.sanitize_sequence(d.items()))
assert i == cbook.sanitize_sequence(i)
assert k == cbook.sanitize_sequence(k)
fail_mapping = (
({'a': 1}, {'forbidden': ('a')}),
({'a': 1}, {'required': ('b')}),
({'a': 1, 'b': 2}, {'required': ('a'), 'allowed': ()})
)
warn_passing_mapping = (
({'a': 1, 'b': 2}, {'a': 1}, {'alias_mapping': {'a': ['b']}}, 1),
({'a': 1, 'b': 2}, {'a': 1},
{'alias_mapping': {'a': ['b']}, 'allowed': ('a',)}, 1),
({'a': 1, 'b': 2}, {'a': 2}, {'alias_mapping': {'a': ['a', 'b']}}, 1),
({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'c': 3},
{'alias_mapping': {'a': ['b']}, 'required': ('a', )}, 1),
)
pass_mapping = (
({'a': 1, 'b': 2}, {'a': 1, 'b': 2}, {}),
({'b': 2}, {'a': 2}, {'alias_mapping': {'a': ['a', 'b']}}),
({'b': 2}, {'a': 2},
{'alias_mapping': {'a': ['b']}, 'forbidden': ('b', )}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', ), 'allowed': ('c', )}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', 'c'), 'allowed': ('c', )}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', 'c'), 'allowed': ('a', 'c')}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3},
{'required': ('a', 'c'), 'allowed': ()}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3}, {'required': ('a', 'c')}),
({'a': 1, 'c': 3}, {'a': 1, 'c': 3}, {'allowed': ('a', 'c')}),
)
@pytest.mark.parametrize('inp, kwargs_to_norm', fail_mapping)
def test_normalize_kwargs_fail(inp, kwargs_to_norm):
with pytest.raises(TypeError):
cbook.normalize_kwargs(inp, **kwargs_to_norm)
@pytest.mark.parametrize('inp, expected, kwargs_to_norm, warn_count',
warn_passing_mapping)
def test_normalize_kwargs_warn(inp, expected, kwargs_to_norm, warn_count):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert expected == cbook.normalize_kwargs(inp, **kwargs_to_norm)
assert len(w) == warn_count
@pytest.mark.parametrize('inp, expected, kwargs_to_norm',
pass_mapping)
def test_normalize_kwargs_pass(inp, expected, kwargs_to_norm):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
assert expected == cbook.normalize_kwargs(inp, **kwargs_to_norm)
assert len(w) == 0
def test_to_prestep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_prestep(x, y1, y2)
x_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y1_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_prestep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_poststep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_poststep(x, y1, y2)
x_target = np.asarray([0, 1, 1, 2, 2, 3, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_poststep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
def test_to_midstep():
x = np.arange(4)
y1 = np.arange(4)
y2 = np.arange(4)[::-1]
xs, y1s, y2s = cbook.pts_to_midstep(x, y1, y2)
x_target = np.asarray([0, .5, .5, 1.5, 1.5, 2.5, 2.5, 3], dtype='float')
y1_target = np.asarray([0, 0, 1, 1, 2, 2, 3, 3], dtype='float')
y2_target = np.asarray([3, 3, 2, 2, 1, 1, 0, 0], dtype='float')
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
assert_array_equal(y2_target, y2s)
xs, y1s = cbook.pts_to_midstep(x, y1)
assert_array_equal(x_target, xs)
assert_array_equal(y1_target, y1s)
@pytest.mark.parametrize(
"args",
[(np.arange(12).reshape(3, 4), 'a'),
(np.arange(12), 'a'),
(np.arange(12), np.arange(3))])
def test_step_fails(args):
with pytest.raises(ValueError):
cbook.pts_to_prestep(*args)
def test_grouper():
class dummy():
pass
a, b, c, d, e = objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
assert set(list(g)[0]) == set(objs)
assert set(g.get_siblings(a)) == set(objs)
for other in objs[1:]:
assert g.joined(a, other)
g.remove(a)
for other in objs[1:]:
assert not g.joined(a, other)
for A, B in itertools.product(objs[1:], objs[1:]):
assert g.joined(A, B)
def test_grouper_private():
class dummy():
pass
objs = [dummy() for j in range(5)]
g = cbook.Grouper()
g.join(*objs)
# reach in and touch the internals !
mapping = g._mapping
for o in objs:
assert ref(o) in mapping
base_set = mapping[ref(objs[0])]
for o in objs[1:]:
assert mapping[ref(o)] is base_set
def test_flatiter():
x = np.arange(5)
it = x.flat
assert 0 == next(it)
assert 1 == next(it)
ret = cbook.safe_first_element(it)
assert ret == 0
assert 0 == next(it)
assert 1 == next(it)
class TestFuncParser(object):
x_test = np.linspace(0.01, 0.5, 3)
validstrings = ['linear', 'quadratic', 'cubic', 'sqrt', 'cbrt',
'log', 'log10', 'log2', 'x**{1.5}', 'root{2.5}(x)',
'log{2}(x)',
'log(x+{0.5})', 'log10(x+{0.1})', 'log{2}(x+{0.1})',
'log{2}(x+{0})']
results = [(lambda x: x),
np.square,
(lambda x: x**3),
np.sqrt,
(lambda x: x**(1. / 3)),
np.log,
np.log10,
np.log2,
(lambda x: x**1.5),
(lambda x: x**(1 / 2.5)),
(lambda x: np.log2(x)),
(lambda x: np.log(x + 0.5)),
(lambda x: np.log10(x + 0.1)),
(lambda x: np.log2(x + 0.1)),
(lambda x: np.log2(x))]
bounded_list = [True, True, True, True, True,
False, False, False, True, True,
False,
True, True, True,
False]
@pytest.mark.parametrize("string, func",
zip(validstrings, results),
ids=validstrings)
def test_values(self, string, func):
func_parser = cbook._StringFuncParser(string)
f = func_parser.function
assert_array_almost_equal(f(self.x_test), func(self.x_test))
@pytest.mark.parametrize("string", validstrings, ids=validstrings)
def test_inverse(self, string):
func_parser = cbook._StringFuncParser(string)
f = func_parser.func_info
fdir = f.function
finv = f.inverse
assert_array_almost_equal(finv(fdir(self.x_test)), self.x_test)
@pytest.mark.parametrize("string", validstrings, ids=validstrings)
def test_get_inverse(self, string):
func_parser = cbook._StringFuncParser(string)
finv1 = func_parser.inverse
finv2 = func_parser.func_info.inverse
assert_array_almost_equal(finv1(self.x_test), finv2(self.x_test))
@pytest.mark.parametrize("string, bounded",
zip(validstrings, bounded_list),
ids=validstrings)
def test_bounded(self, string, bounded):
func_parser = cbook._StringFuncParser(string)
b = func_parser.is_bounded_0_1
assert_array_equal(b, bounded)
| {
"repo_name": "louisLouL/pair_trading",
"path": "capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_cbook.py",
"copies": "2",
"size": "17730",
"license": "mit",
"hash": -9085241931884394000,
"line_mean": 31.2363636364,
"line_max": 76,
"alpha_frac": 0.5490693739,
"autogenerated": false,
"ratio": 3.1214788732394365,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4670548247139437,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
from datetime import datetime, timedelta
from formencode import Schema, NestedVariables, validators
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPFound, HTTPBadRequest
from pyramid.security import forget, remember
from pyramid_uniform import Form, FormRenderer
from .. import mail, model
log = logging.getLogger(__name__)
class LoginForm(Schema):
"""
Schema for validating login attempts.
"""
allow_extra_fields = False
email = validators.UnicodeString(not_empty=False, strip=True)
password = validators.UnicodeString(not_empty=False, strip=True)
remember_me = validators.Bool()
class SettingsForm(Schema):
allow_extra_fields = False
pre_validators = [NestedVariables()]
name = validators.UnicodeString(not_empty=True, strip=True)
email = validators.UnicodeString(not_empty=True, strip=True)
password = validators.UnicodeString(not_empty=False, min=4, strip=True)
password2 = validators.UnicodeString(not_empty=False, strip=True)
chained_validators = [validators.FieldsMatch('password', 'password2')]
class ForgotPasswordForm(Schema):
allow_extra_fields = False
email = validators.UnicodeString(not_empty=True, strip=True)
class ForgotResetForm(Schema):
allow_extra_fields = False
password = validators.UnicodeString(not_empty=False, min=4, strip=True)
password2 = validators.UnicodeString(not_empty=False, strip=True)
chained_validators = [validators.FieldsMatch('password', 'password2')]
def constant_time_compare(a, b):
"Compare two strings with constant time. Used to prevent timing attacks."
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
class UserView(object):
def __init__(self, request):
self.request = request
def _do_login(self, email, password, remember_me):
request = self.request
user = model.Session.query(model.User).\
filter_by(email=email).\
first()
if user and user.check_password(password):
# Set auth token.
remember(request, user.id, user=user, remember=remember_me)
request.flash('Login successful.', 'success')
raise HTTPFound(location=request.route_url('account'))
else:
request.flash('Email or password incorrect.', 'danger')
@view_config(route_name='account', renderer='account.html',
permission='authenticated')
def account(self):
return {}
@view_config(route_name='settings', renderer='settings.html',
permission='authenticated')
def settings(self):
request = self.request
form = Form(request, schema=SettingsForm)
if form.validate():
password = form.data.pop('password')
del form.data['password2']
form.bind(request.user)
request.flash('Saved settings.', 'success')
if password:
request.user.update_password(password)
request.flash('Updated password.', 'success')
return HTTPFound(location=request.route_url('account'))
return dict(renderer=FormRenderer(form))
@view_config(route_name='login', renderer='login.html')
def login(self):
"""
In a GET, just show the login form.
In a POST, accept params and try to authenticate the user.
"""
request = self.request
form = Form(request, schema=LoginForm, skip_csrf=True)
if form.validate():
email = form.data['email']
password = form.data['password']
remember_me = form.data['remember_me']
self._do_login(email, password, remember_me)
return dict(renderer=FormRenderer(form))
@view_config(route_name='logout')
def logout(self):
"""
Log the user out.
"""
request = self.request
if request.user:
request.flash('You have been logged out.', 'info')
forget(request)
raise HTTPFound(location=request.route_url('login'))
return {}
def _get_user(self, email):
return model.Session.query(model.User).\
filter_by(email=email).\
first()
def _validate_reset_token(self):
"""
Check forgotten password reset token and grab account.
This will raise a ``400 Bad Request`` if all of the following
conditions aren't met::
- an ``email`` param must be present
- a ``token`` param must be present
- an active account must be associated with the ``email`` param
- the ``token`` param must match the account's password reset
token
"""
request = self.request
params = request.GET
params_present = 'email' in params and 'token' in params
user = None
tokens_match = False
if params_present:
email = params['email']
token = params['token']
user = self._get_user(email)
if user:
expected_token = user.password_reset_token
tokens_match = constant_time_compare(expected_token, token)
if not (params_present and user and tokens_match):
log.warn('invalid_reset_token email:%s token:%s',
params.get('email'), params.get('token'))
raise HTTPBadRequest
now = datetime.utcnow()
expiration_time = user.password_reset_time + timedelta(days=1)
if now > expiration_time:
request.flash('Password reset email has expired.', 'danger')
raise HTTPFound(location=request.route_url('forgot-password'))
return user
@view_config(route_name='forgot-password', renderer='forgot_password.html')
def forgot_password(self):
request = self.request
form = Form(request, schema=ForgotPasswordForm)
if form.validate():
user = self._get_user(form.data['email'])
if not user:
request.flash("No user with that email address "
"exists. Please double check it.", 'danger')
raise HTTPFound(location=request.current_route_url())
token = user.set_reset_password_token()
link = request.route_url('forgot-reset', _query=dict(
email=user.email,
token=token,
))
vars = dict(user=user, link=link)
mail.send(request, 'forgot_password', vars, to=[user.email])
request.flash("An email has been sent with "
"instructions to reset your password.", 'danger')
return HTTPFound(location=request.route_url('login'))
return dict(renderer=FormRenderer(form))
@view_config(route_name='forgot-reset', renderer='forgot_reset.html')
def forgot_reset(self):
request = self.request
user = self._validate_reset_token()
form = Form(request, schema=ForgotResetForm)
if form.validate():
user.update_password(form.data['password'])
request.flash("Password has been updated.", 'success')
return HTTPFound(location=request.route_url('login'))
return dict(renderer=FormRenderer(form))
| {
"repo_name": "storborg/warpworks",
"path": "warpworks/views/auth.py",
"copies": "1",
"size": "7512",
"license": "mit",
"hash": -6939931899803676000,
"line_mean": 32.9909502262,
"line_max": 79,
"alpha_frac": 0.613684771,
"autogenerated": false,
"ratio": 4.258503401360544,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 221
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
from itertools import chain
from pprint import pformat
from functools import wraps
import six
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
import transaction as zope_transaction
from zope.interface import implementer
from transaction.interfaces import ISavepointDataManager
from .query import ElasticQuery
from .result import ElasticResultRecord
log = logging.getLogger(__name__)
ANALYZER_SETTINGS = {
"analysis": {
"filter": {
"snowball": {
"type": "snowball",
"language": "English"
},
},
"analyzer": {
"lowercase": {
"type": "custom",
"tokenizer": "standard",
"filter": ["standard", "lowercase"]
},
"email": {
"type": "custom",
"tokenizer": "uax_url_email",
"filter": ["standard", "lowercase"]
},
"content": {
"type": "custom",
"tokenizer": "standard",
"char_filter": ["html_strip"],
"filter": ["standard", "lowercase", "stop", "snowball"]
}
}
}
}
CREATE_INDEX_SETTINGS = ANALYZER_SETTINGS.copy()
CREATE_INDEX_SETTINGS.update({
"index": {
"number_of_shards": 2,
"number_of_replicas": 0
},
})
STATUS_ACTIVE = 'active'
STATUS_CHANGED = 'changed'
_CLIENT_STATE = {}
@implementer(ISavepointDataManager)
class ElasticDataManager(object):
def __init__(self, client, transaction_manager):
self.client = client
self.transaction_manager = transaction_manager
t = transaction_manager.get()
t.join(self)
_CLIENT_STATE[id(client)] = STATUS_ACTIVE
self._reset()
def _reset(self):
log.error('_reset(%s)', self)
self.client.uncommitted = []
def _finish(self):
log.error('_finish(%s)', self)
client = self.client
del _CLIENT_STATE[id(client)]
def abort(self, transaction):
log.error('abort(%s)', self)
self._reset()
self._finish()
def tpc_begin(self, transaction):
log.error('tpc_begin(%s)', self)
pass
def commit(self, transaction):
log.error('commit(%s)', self)
pass
def tpc_vote(self, transaction):
log.error('tpc_vote(%s)', self)
# XXX Ideally, we'd try to check the uncommitted queue and make sure
# everything looked ok. Note sure how we can do that, though.
pass
def tpc_finish(self, transaction):
# Actually persist the uncommitted queue.
log.error('tpc_finish(%s)', self)
log.warn("running: %r", self.client.uncommitted)
for cmd, args, kwargs in self.client.uncommitted:
kwargs['immediate'] = True
getattr(self.client, cmd)(*args, **kwargs)
self._reset()
self._finish()
def tpc_abort(self, transaction):
log.error('tpc_abort()')
self._reset()
self._finish()
def sortKey(self):
# NOTE: Ideally, we want this to sort *after* database-oriented data
# managers, like the SQLAlchemy one. The double tilde should get us
# to the end.
return '~~elasticsearch' + str(id(self))
def savepoint(self):
return ElasticSavepoint(self)
class ElasticSavepoint(object):
def __init__(self, dm):
self.dm = dm
self.saved = dm.client.uncommitted.copy()
def rollback(self):
self.dm.client.uncommitted = self.saved.copy()
def join_transaction(client, transaction_manager):
client_id = id(client)
existing_state = _CLIENT_STATE.get(client_id, None)
if existing_state is None:
log.error('client %s not found, setting up new data manager',
client_id)
ElasticDataManager(client, transaction_manager)
else:
log.error('client %s found, using existing data manager',
client_id)
_CLIENT_STATE[client_id] = STATUS_CHANGED
def transactional(f):
@wraps(f)
def transactional_inner(client, *args, **kwargs):
immediate = kwargs.pop('immediate', None)
if client.use_transaction:
if immediate:
return f(client, *args, **kwargs)
else:
log.error('enqueueing action: %s: %r, %r', f.__name__, args,
kwargs)
join_transaction(client, client.transaction_manager)
client.uncommitted.append((f.__name__, args, kwargs))
return
return f(client, *args, **kwargs)
return transactional_inner
class ElasticClient(object):
"""
A handle for interacting with the Elasticsearch backend.
"""
def __init__(self, servers, index, timeout=1.0, disable_indexing=False,
use_transaction=True,
transaction_manager=zope_transaction.manager):
self.index = index
self.disable_indexing = disable_indexing
self.use_transaction = use_transaction
self.transaction_manager = transaction_manager
self.es = Elasticsearch(servers)
def ensure_index(self, recreate=False):
"""
Ensure that the index exists on the ES server, and has up-to-date
settings.
"""
exists = self.es.indices.exists(self.index)
if recreate or not exists:
if exists:
self.es.indices.delete(self.index)
self.es.indices.create(self.index,
body=dict(settings=CREATE_INDEX_SETTINGS))
def delete_index(self):
"""
Delete the index on the ES server.
"""
self.es.indices.delete(self.index)
def ensure_mapping(self, cls, recreate=False):
"""
Put an explicit mapping for the given class if it doesn't already
exist.
"""
doc_type = cls.__name__
doc_mapping = cls.elastic_mapping()
doc_mapping = dict(doc_mapping)
if cls.elastic_parent:
doc_mapping["_parent"] = {
"type": cls.elastic_parent
}
doc_mapping = {doc_type: doc_mapping}
log.debug('Putting mapping: \n%s', pformat(doc_mapping))
if recreate:
try:
self.es.indices.delete_mapping(index=self.index,
doc_type=doc_type)
except NotFoundError:
pass
self.es.indices.put_mapping(index=self.index,
doc_type=doc_type,
body=doc_mapping)
def delete_mapping(self, cls):
"""
Delete the mapping corresponding to ``cls`` on the server. Does not
delete subclass mappings.
"""
doc_type = cls.__name__
self.es.indices.delete_mapping(index=self.index,
doc_type=doc_type)
def ensure_all_mappings(self, base_class, recreate=False):
"""
Initialize explicit mappings for all subclasses of the specified
SQLAlcehmy declarative base class.
"""
for cls in base_class._decl_class_registry.values():
if hasattr(cls, 'elastic_mapping'):
self.ensure_mapping(cls, recreate=recreate)
def get_mappings(self, cls=None):
"""
Return the object mappings currently used by ES.
"""
doc_type = cls and cls.__name__
raw = self.es.indices.get_mapping(index=self.index,
doc_type=doc_type)
return raw[self.index]['mappings']
def index_object(self, obj, **kw):
"""
Add or update the indexed document for an object.
"""
doc = obj.elastic_document()
doc_type = obj.__class__.__name__
doc_id = doc.pop("_id")
doc_parent = obj.elastic_parent
log.debug('Indexing object:\n%s', pformat(doc))
log.debug('Type is %r', doc_type)
log.debug('ID is %r', doc_id)
log.debug('Parent is %r', doc_parent)
self.index_document(id=doc_id,
doc_type=doc_type,
doc=doc,
parent=doc_parent,
**kw)
def delete_object(self, obj, safe=False, **kw):
"""
Delete the indexed document for an object.
"""
doc = obj.elastic_document()
doc_type = obj.__class__.__name__
doc_id = doc.pop("_id")
doc_parent = obj.elastic_parent
self.delete_document(id=doc_id,
doc_type=doc_type,
parent=doc_parent,
safe=safe,
**kw)
@transactional
def index_document(self, id, doc_type, doc, parent=None):
"""
Add or update the indexed document from a raw document source (not an
object).
"""
if self.disable_indexing:
return
kwargs = dict(index=self.index,
body=doc,
doc_type=doc_type,
id=id)
if parent:
kwargs['parent'] = parent
self.es.index(**kwargs)
@transactional
def delete_document(self, id, doc_type, parent=None, safe=False):
"""
Delete the indexed document based on a raw document source (not an
object).
"""
if self.disable_indexing:
return
kwargs = dict(index=self.index,
doc_type=doc_type,
id=id)
if parent:
kwargs['routing'] = parent
try:
self.es.delete(**kwargs)
except NotFoundError:
if not safe:
raise
def index_objects(self, objects):
"""
Add multiple objects to the index.
"""
for obj in objects:
self.index_object(obj)
def flush(self, force=True):
self.es.indices.flush(force=force)
def get(self, obj, routing=None):
"""
Retrieve the ES source document for a given object or (document type,
id) pair.
"""
if isinstance(obj, tuple):
doc_type, doc_id = obj
else:
doc_type, doc_id = obj.__class__.__name__, obj.id
if obj.elastic_parent:
routing = obj.elastic_parent
kwargs = dict(index=self.index,
doc_type=doc_type,
id=doc_id)
if routing:
kwargs['routing'] = routing
r = self.es.get(**kwargs)
return ElasticResultRecord(r)
def refresh(self):
"""
Refresh the ES index.
"""
self.es.indices.refresh(index=self.index)
def subtype_names(self, cls):
"""
Return a list of document types to query given an object class.
"""
classes = [cls] + [m.class_ for m in
cls.__mapper__._inheriting_mappers]
return [c.__name__ for c in classes
if hasattr(c, "elastic_mapping")]
def search(self, body, classes=None, fields=None, **query_params):
"""
Run ES search using default indexes.
"""
doc_types = classes and list(chain.from_iterable(
[doc_type] if isinstance(doc_type, six.string_types) else
self.subtype_names(doc_type)
for doc_type in classes))
if fields:
query_params['fields'] = fields
return self.es.search(index=self.index,
doc_type=','.join(doc_types),
body=body,
**query_params)
def query(self, *classes, **kw):
"""
Return an ElasticQuery against the specified class.
"""
cls = kw.pop('cls', ElasticQuery)
return cls(client=self, classes=classes, **kw)
| {
"repo_name": "storborg/pyramid_es",
"path": "pyramid_es/client.py",
"copies": "1",
"size": "12207",
"license": "mit",
"hash": 6016018524733611000,
"line_mean": 29.2903225806,
"line_max": 77,
"alpha_frac": 0.5360858524,
"autogenerated": false,
"ratio": 4.272663633181659,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 403
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import abc
import itertools
import os
import time
from binascii import hexlify
from datetime import datetime
from collections import MutableMapping
from itsdangerous import BadSignature
from .compat import to_native_str
log = logging.getLogger('gimlet')
# Used by :meth:`Session.get` to detect when no options are explicitly
# passed.
DEFAULT = object()
class Session(MutableMapping):
"""Abstract front end for multiple session channels."""
# Subclasses need to define all of these
backend = abc.abstractproperty
channel_names = abc.abstractproperty
channel_opts = abc.abstractproperty
defaults = abc.abstractproperty
serializer = abc.abstractproperty
def __init__(self, request):
self.request = request
self.flushed = False
channels = {}
for key in self.channel_names:
channels[key] = self.read_channel(key)
self.channels = channels
self.has_backend = all(
(ch.backend is not None) for ch in channels.values())
if hasattr(request, 'add_response_callback'):
request.add_response_callback(self.write_callback)
@property
def default_channel(self):
return self.channels['perm']
@property
def id(self):
return self.default_channel.id
@property
def created_timestamp(self):
return self.default_channel.created_timestamp
@property
def created_time(self):
return self.default_channel.created_time
def write_callback(self, request, response):
self.flushed = True
for key in self.channels:
self.write_channel(request, response, key, self.channels[key])
def response_callback(self, request, response):
# This is a noop, but exists for compatibilty with usage of previous
# versions of gimlet, that did not implicitly add the write callback.
pass
def __getitem__(self, key):
"""Get value for ``key`` from the first channel it's found in."""
for channel in self.channels.values():
try:
return channel.get(key)
except KeyError:
pass
raise KeyError(key)
def _check_options(self, permanent, clientside):
# If no backend is present, don't allow explicitly setting a key as
# non-clientside.
if (not self.has_backend) and (clientside is False):
raise ValueError('setting a non-clientside key with no backend '
'present is not supported')
if permanent is None:
permanent = self.defaults['permanent']
if clientside is None:
clientside = self.defaults['clientside']
if self.flushed and clientside:
raise ValueError('clientside keys cannot be set after the WSGI '
'response has been returned')
if permanent:
channel_key = 'perm'
else:
channel_key = 'nonperm'
return self.channels[channel_key], clientside
def get(self, key, default=None, permanent=DEFAULT, clientside=DEFAULT):
"""Get value for ``key`` or ``default`` if ``key`` isn't present.
When no options are passed, this behaves like `[]`--it will return
the value for ``key`` from the first channel it's found in.
On the other hand, if *any* option is specified, this will check
*all* of the options, set defaults for those that aren't passed,
then try to get the value from a specific channel.
In either case, if ``key`` isn't present, the ``default`` value is
returned, just like a normal ``dict.get()``.
"""
options = permanent, clientside
if all(opt is DEFAULT for opt in options):
action = lambda: self[key]
else:
options = (opt if opt is not DEFAULT else None for opt in options)
channel, clientside = self._check_options(*options)
action = lambda: channel.get(key, clientside=clientside)
try:
return action()
except KeyError:
return default
def __setitem__(self, key, val):
return self.set(key, val)
def set(self, key, val, permanent=None, clientside=None):
if key in self:
del self[key]
channel, clientside = self._check_options(permanent, clientside)
channel.set(key, val, clientside=clientside)
# If the response has already been flushed, we need to explicitly
# persist this set to the backend.
if self.flushed:
channel.backend_write()
def save(self, permanent=None, clientside=None):
channel, clientside = self._check_options(permanent, clientside)
if clientside:
channel.client_dirty = True
else:
channel.backend_dirty = True
def __delitem__(self, key):
if key not in self:
raise KeyError(key)
for channel in self.channels.values():
if key in channel:
channel.delete(key)
def __contains__(self, key):
return any((key in channel) for channel in self.channels.values())
def __iter__(self):
return itertools.chain(*[iter(ch) for ch in self.channels.values()])
def __len__(self):
return sum([len(ch) for ch in self.channels.values()])
def is_permanent(self, key):
return key in self.channels.get('perm', {})
def __repr__(self):
keys = '\n'.join(["-- %s --\n%r" % (k, v) for k, v in
self.channels.items()])
return "<Session \n%s\n>" % keys
def make_session_id(self):
return hexlify(os.urandom(16))
def read_channel(self, key):
name = self.channel_names[key]
if name in self.request.cookies:
try:
id, created_timestamp, client_data = \
self.serializer.loads(self.request.cookies[name])
except BadSignature as e:
log.warn('Request from %s contained bad sig. %s',
self.request.remote_addr, e)
return self.fresh_channel()
else:
return SessionChannel(id, created_timestamp, self.backend,
fresh=False, client_data=client_data)
else:
return self.fresh_channel()
def write_channel(self, req, resp, key, channel):
name = self.channel_names[key]
# Set a cookie IFF the following conditions:
# - data has been changed on the client
# OR
# - the cookie is fresh
if channel.client_dirty or channel.fresh:
resp.set_cookie(name,
self.serializer.dumps(channel),
httponly=True,
secure=req.scheme == 'https',
**self.channel_opts[key])
# Write to the backend IFF the following conditions:
# - data has been changed on the backend
if channel.backend_dirty:
channel.backend_write()
def fresh_channel(self):
return SessionChannel(
self.make_session_id(), int(time.time()), self.backend, fresh=True)
def invalidate(self):
self.clear()
for key in self.channels:
self.channels[key] = self.fresh_channel()
# Flash & CSRF methods taken directly from pyramid_beaker.
# These are part of the Pyramid Session API.
def flash(self, msg, queue='', allow_duplicate=True):
storage = self.setdefault('_f_' + queue, [])
if allow_duplicate or (msg not in storage):
storage.append(msg)
def pop_flash(self, queue=''):
storage = self.pop('_f_' + queue, [])
return storage
def peek_flash(self, queue=''):
storage = self.get('_f_' + queue, [])
return storage
def new_csrf_token(self):
token = to_native_str(hexlify(os.urandom(20)))
self['_csrft_'] = token
return token
def get_csrf_token(self):
token = self.get('_csrft_', None)
if token is None:
token = self.new_csrf_token()
return token
class SessionChannel(object):
def __init__(self, id, created_timestamp, backend, fresh,
client_data=None):
self.dirty_keys = set()
self.id = id
self.created_timestamp = created_timestamp
self.backend = backend
self.fresh = fresh
self.client_data = client_data or {}
self.client_dirty = False
self.backend_data = {}
self.backend_dirty = False
self.backend_loaded = False
def backend_read(self):
if (not self.backend_loaded) and (self.backend is not None):
try:
self.backend_data = self.backend[self.id]
except KeyError:
self.backend_data = {}
self.backend_loaded = True
def backend_write(self):
self.backend[self.id] = self.backend_data
@property
def created_time(self):
return datetime.utcfromtimestamp(self.created_timestamp)
def __iter__(self):
self.backend_read()
return itertools.chain(iter(self.client_data), iter(self.backend_data))
def __len__(self):
self.backend_read()
return len(self.backend_data) + len(self.client_data)
def get(self, key, clientside=None):
if ((clientside is None) and (key in self.client_data)) or clientside:
return self.client_data[key]
else:
self.backend_read()
return self.backend_data[key]
def set(self, key, value, clientside=None):
if clientside:
self.client_data[key] = value
self.client_dirty = True
else:
self.backend_data[key] = value
self.backend_dirty = True
def delete(self, key):
if key in self.client_data:
del self.client_data[key]
self.client_dirty = True
else:
self.backend_read()
del self.backend_data[key]
self.backend_dirty = True
def __repr__(self):
self.backend_read()
return ("id %s\ncreated %s\nbackend %r\nclient %r" %
(self.id, self.created_time, self.backend_data,
self.client_data))
| {
"repo_name": "storborg/gimlet",
"path": "gimlet/session.py",
"copies": "1",
"size": "10504",
"license": "mit",
"hash": -782994047468771200,
"line_mean": 31.4197530864,
"line_max": 79,
"alpha_frac": 0.5864432597,
"autogenerated": false,
"ratio": 4.262987012987013,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5349430272687012,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import copy
from functools import wraps
from collections import OrderedDict
import six
from .result import ElasticResult
log = logging.getLogger(__name__)
ARBITRARILY_LARGE_SIZE = 100000
def generative(f):
"""
A decorator to wrap query methods to make them automatically generative.
"""
@wraps(f)
def wrapped(self, *args, **kwargs):
self = self._generate()
f(self, *args, **kwargs)
return self
return wrapped
def filters(f):
"""
A convenience decorator to wrap query methods that are adding filters. To
use, simply make a method that returns a filter dict in elasticsearch's
JSON object format.
Should be used inside @generative (listed after in decorator order).
"""
@wraps(f)
def wrapped(self, *args, **kwargs):
val = f(self, *args, **kwargs)
self.filters.append(val)
return wrapped
class ElasticQuery(object):
"""
Represents a query to be issued against the ES backend.
"""
def __init__(self, client, classes=None, q=None):
if not q:
q = self.match_all_query()
elif isinstance(q, six.string_types):
q = self.text_query(q, operator='and')
self.base_query = q
self.client = client
self.classes = classes
self.filters = []
self.suggests = {}
self.sorts = OrderedDict()
self.facets = {}
self._size = None
self._start = None
def _generate(self):
s = self.__class__.__new__(self.__class__)
s.__dict__ = self.__dict__.copy()
s.filters = list(s.filters)
s.suggests = s.suggests.copy()
s.sorts = s.sorts.copy()
s.facets = s.facets.copy()
return s
@staticmethod
def match_all_query():
"""
Static method to return a filter dict which will match everything. Can
be overridden in a subclass to customize behavior.
"""
return {
'match_all': {}
}
@staticmethod
def text_query(phrase, operator="and"):
"""
Static method to return a filter dict to match a text search. Can be
overridden in a subclass to customize behavior.
"""
return {
"match": {
'_all': {
"query": phrase,
"operator": operator,
"analyzer": "content"
}
}
}
@generative
@filters
def filter_term(self, term, value):
"""
Filter for documents where the field ``term`` matches ``value``.
"""
return {'term': {term: value}}
@generative
@filters
def filter_terms(self, term, value):
"""
Filter for documents where the field ``term`` matches one of the
elements in ``value`` (which should be a sequence).
"""
return {'terms': {term: value}}
@generative
@filters
def filter_value_upper(self, term, upper):
"""
Filter for documents where term is numerically less than ``upper``.
"""
return {'range': {term: {'to': upper, 'include_upper': True}}}
@generative
@filters
def filter_value_lower(self, term, lower):
"""
Filter for documents where term is numerically more than ``lower``.
"""
return {'range': {term: {'from': lower, 'include_lower': True}}}
@generative
@filters
def filter_has_parent_term(self, parent_type, term, value):
return {
'has_parent': {
'parent_type': parent_type,
'query': {
'term': {
term: value,
}
}
}
}
@generative
def order_by(self, key, desc=False):
"""
Sort results by the field ``key``. Default to ascending order, unless
``desc`` is True.
"""
order = "desc" if desc else "asc"
self.sorts['order_by_%s' % key] = {key: {"order": order}}
@generative
def add_facet(self, facet):
"""
Add a query facet, to return data used for the implementation of
faceted search (e.g. returning result counts for given possible
sub-queries).
The facet should be supplied as a dict in the format that ES uses for
representation.
It is recommended to use the helper methods ``add_term_facet()`` or
``add_range_facet()`` where possible.
"""
self.facets.update(facet)
def add_term_facet(self, name, size, field):
"""
Add a term facet.
ES will return data about document counts for the top sub-queries (by
document count) in which the results are filtered by a given term.
"""
return self.add_facet({
name: {
'terms': {
'field': field,
'size': size
}
}
})
def add_range_facet(self, name, field, ranges):
"""
Add a range facet.
ES will return data about documetn counts for the top sub-queries (by
document count) inw hich the results are filtered by a given numerical
range.
"""
return self.add_facet({
name: {
'range': {
'field': field,
'ranges': ranges,
}
}
})
@generative
def add_term_suggester(self, name, field, text, sort='score',
suggest_mode='missing'):
self.suggests[name] = {
'text': text,
'term': {
'field': field,
'sort': sort,
'suggest_mode': suggest_mode,
}
}
@generative
def offset(self, n):
"""
When returning results, start at document ``n``.
"""
if self._start is not None:
raise ValueError('This query already has an offset applied.')
self._start = n
start = offset
@generative
def limit(self, n):
"""
When returning results, stop at document ``n``.
"""
if self._size is not None:
raise ValueError('This query already has a limit applied.')
self._size = n
size = limit
def _search(self, start=None, size=None, fields=None):
q = copy.copy(self.base_query)
if self.filters:
f = {'and': self.filters}
q = {
'filtered': {
'filter': f,
'query': q,
}
}
q_start = self._start or 0
q_size = self._size or ARBITRARILY_LARGE_SIZE
if size is not None:
q_size = max(0,
size if q_size is None else
min(size, q_size - q_start))
if start is not None:
q_start = q_start + start
body = {
'sort': list(self.sorts.values()),
'query': q
}
if self.facets:
body['facets'] = self.facets
if self.suggests:
body['suggest'] = self.suggests
return self.client.search(body, classes=self.classes, fields=fields,
size=q_size, from_=q_start)
def execute(self, start=None, size=None, fields=None):
"""
Execute this query and return a result set.
"""
return ElasticResult(self._search(start=start, size=size,
fields=fields))
def count(self):
"""
Execute this query to determine the number of documents that would be
returned, but do not actually fetch documents. Returns an int.
"""
res = self._search(size=0)
return res['hits']['total']
| {
"repo_name": "storborg/pyramid_es",
"path": "pyramid_es/query.py",
"copies": "1",
"size": "8029",
"license": "mit",
"hash": -3244039409924313600,
"line_mean": 26.9756097561,
"line_max": 78,
"alpha_frac": 0.5166272263,
"autogenerated": false,
"ratio": 4.351761517615176,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5368388743915177,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import pprint as pp
from collections import OrderedDict
from .utils import sorted_unique
class Facet(object):
def __init__(self, data, is_wrap, rowvar=None, colvar=None, nrow=None, ncol=None, scales=None):
self.rowvar = rowvar
self.colvar = colvar
self.is_wrap = is_wrap
self.nrow = nrow
self.ncol = ncol
self.facet_map = OrderedDict()
self.scales = scales
# if it's a facet_wrap, figure out how many rows and columns there should be
# assign subplot indices to rowvars and columnvars
self.ndim = ndim = self.calculate_ndimensions(data, rowvar, colvar)
if is_wrap==True:
if self.nrow:
self.ncol = ncol = int(math.ceil(ndim / float(self.nrow)))
self.nrow = nrow = int(self.nrow)
elif self.ncol:
self.nrow = nrow = int(math.ceil(ndim / float(self.ncol)))
self.ncol = ncol = int(self.ncol)
else:
self.nrow = nrow = int(math.ceil(math.sqrt(ndim)))
self.ncol = ncol = int(math.ceil(ndim / math.ceil(math.sqrt(ndim))))
else:
if rowvar:
self.nrow = nrow = data[rowvar].nunique()
else:
self.nrow = nrow = 1
if colvar:
self.ncol = ncol = data[colvar].nunique()
else:
self.ncol = ncol = 1
facet_values = self.generate_subplot_index(data, rowvar, colvar)
for row in range(nrow):
for col in range(ncol):
try:
value = next(facet_values)
except Exception as e:
continue
if ncol==1:
self.facet_map[value] = (row, None)
elif nrow==1:
self.facet_map[value] = (None, col)
else:
self.facet_map[value] = (row, col)
def generate_subplot_index(self, data, rowvar, colvar):
if rowvar and colvar:
for row in sorted_unique(data[rowvar]):
for col in sorted_unique(data[colvar]):
yield (row, col)
elif rowvar:
for row in sorted_unique(data[rowvar]):
yield row
elif colvar:
for col in sorted_unique(data[colvar]):
yield col
def calculate_ndimensions(self, data, rowvar, colvar):
if rowvar and colvar:
return data[rowvar].nunique() * data[colvar].nunique()
elif rowvar:
return data[rowvar].nunique()
elif colvar:
return data[colvar].nunique()
else:
raise Exception("No row or column specified to facet on!")
@property
def facet_cols(self):
cols = []
if self.rowvar:
cols.append(self.rowvar)
if self.colvar:
cols.append(self.colvar)
return cols
class facet_wrap(object):
"""
Wrap panels from x and (optionally) y variables to create subplots.
Parameters
-----------
x:
x facet
y:
y facet
nrow:
number of rows in your final plot
ncol:
number of columns in your final plot
scales:
how individual panels x and y axes will be scaled. options are:
"free" - x and y axis are different for each panel
"free_y" - panels have same x axis but different y axis scales
"free_x" - panels have same y axis but different x axis scales
"fixed" - all panels are the same
Examples
--------
"""
def __init__(self, x=None, y=None, nrow=None, ncol=None, scales=None):
self.x_var = x
self.y_var = y
self.nrow = nrow
self.ncol = ncol
self.scales = scales
def __radd__(self, gg):
if gg.__class__.__name__=="ggplot":
gg.facets = Facet(gg.data, True, self.x_var, self.y_var, nrow=self.nrow, ncol=self.ncol, scales=self.scales)
return gg
return self
class facet_grid(object):
"""
Layout panels from x and (optionally) y variables in a grid format.
Parameters
-----------
x:
x facet
y:
y facet
nrow:
number of rows in your final plot
ncol:
number of columns in your final plot
scales:
how individual panels x and y axes will be scaled. options are:
"free" - x and y axis are different for each panel
"free_y" - panels have same x axis but different y axis scales
"free_x" - panels have same y axis but different x axis scales
"fixed" - all panels are the same
Examples
--------
"""
def __init__(self, x=None, y=None, scales=None):
self.x_var = x
self.y_var = y
self.scales = scales
def __radd__(self, gg):
if gg.__class__.__name__=="ggplot":
gg.facets = Facet(gg.data, False, self.x_var, self.y_var, scales=self.scales)
return gg
return self
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/facets.py",
"copies": "1",
"size": "5188",
"license": "bsd-2-clause",
"hash": -2323080095445705700,
"line_mean": 31.2236024845,
"line_max": 120,
"alpha_frac": 0.5381649961,
"autogenerated": false,
"ratio": 3.803519061583578,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4841684057683578,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
from cycler import cycler
from .theme import theme_base
class theme_gray(theme_base):
"""
Standard theme for ggplot. Gray background w/ white gridlines.
Copied from the the ggplot2 codebase:
https://github.com/hadley/ggplot2/blob/master/R/theme-defaults.r
"""
def __init__(self):
super(theme_gray, self).__init__()
self._rcParams["timezone"] = "UTC"
self._rcParams["lines.linewidth"] = "1.0"
self._rcParams["lines.antialiased"] = "True"
self._rcParams["patch.linewidth"] = "0.5"
self._rcParams["patch.facecolor"] = "348ABD"
self._rcParams["patch.edgecolor"] = "#E5E5E5"
self._rcParams["patch.antialiased"] = "True"
self._rcParams["font.family"] = "sans-serif"
self._rcParams["font.size"] = "12.0"
self._rcParams["font.serif"] = ["Times", "Palatino",
"New Century Schoolbook",
"Bookman", "Computer Modern Roman",
"Times New Roman"]
self._rcParams["font.sans-serif"] = ["Helvetica", "Avant Garde",
"Computer Modern Sans serif",
"Arial"]
self._rcParams["axes.facecolor"] = "#E5E5E5"
self._rcParams["axes.edgecolor"] = "bcbcbc"
self._rcParams["axes.linewidth"] = "1"
self._rcParams["axes.grid"] = "True"
self._rcParams["axes.titlesize"] = "x-large"
self._rcParams["axes.labelsize"] = "large"
self._rcParams["axes.labelcolor"] = "black"
self._rcParams["axes.axisbelow"] = "True"
self._rcParams["axes.prop_cycle"] = cycler('color', ["#333333", "#348ABD", "#7A68A6",
"#A60628", "#467821", "#CF4457", "#188487", "#E24A33"])
self._rcParams["grid.color"] = "white"
self._rcParams["grid.linewidth"] = "1.4"
self._rcParams["grid.linestyle"] = "solid"
self._rcParams["xtick.major.size"] = "0"
self._rcParams["xtick.minor.size"] = "0"
self._rcParams["xtick.major.pad"] = "6"
self._rcParams["xtick.minor.pad"] = "6"
self._rcParams["xtick.color"] = "#7F7F7F"
self._rcParams["xtick.direction"] = "out" # pointing out of axis
self._rcParams["ytick.major.size"] = "0"
self._rcParams["ytick.minor.size"] = "0"
self._rcParams["ytick.major.pad"] = "6"
self._rcParams["ytick.minor.pad"] = "6"
self._rcParams["ytick.color"] = "#7F7F7F"
self._rcParams["ytick.direction"] = "out" # pointing out of axis
self._rcParams["legend.fancybox"] = "True"
self._rcParams["figure.figsize"] = "11, 8"
self._rcParams["figure.facecolor"] = "1.0"
self._rcParams["figure.edgecolor"] = "0.50"
self._rcParams["figure.subplot.hspace"] = "0.5"
# TODO: this slows down everything for some reason
# self._rcParams["text.usetex"] = "True"
def apply_final_touches(self, ax):
'''Styles x,y axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have
been carried out (needs to know final tick spacing)
From: https://github.com/wrobstory/climatic/blob/master/climatic/stylers.py
'''
#Remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
#Restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_markeredgewidth(1.4)
#Only show bottom left ticks
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
#Set minor grid lines
ax.grid(True, 'minor', color='#F2F2F2', linestyle='-', linewidth=0.7)
if not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
if not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.LogLocator):
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator(2))
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/themes/theme_gray.py",
"copies": "1",
"size": "4342",
"license": "bsd-2-clause",
"hash": -8330410893780642000,
"line_mean": 45.688172043,
"line_max": 100,
"alpha_frac": 0.5702441271,
"autogenerated": false,
"ratio": 3.4625199362041466,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9517638053333544,
"avg_score": 0.0030252019941205428,
"num_lines": 93
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
from .geom import geom
import numpy as np
class geom_point(geom):
DEFAULT_AES = {'alpha': 1, 'color': 'black', 'fill': None,
'shape': 'o', 'size': 20}
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity', 'cmap':None}
_aes_renames = {'size': 's', 'shape': 'marker', 'fill': 'facecolor'}
_units = {'alpha', 'marker'}
def _plot_unit(self, pinfo, ax):
fc = pinfo['facecolor']
if fc is None:
# default to color
pinfo['facecolor'] = pinfo['color']
elif fc is False:
# Matlab expects empty string instead of False
pinfo['facecolor'] = ''
# for some reason, scatter doesn't default to the same color styles
# as the axes.color_cycle
if "color" not in pinfo and self.params['cmap'] is None:
pinfo["color"] = mpl.rcParams.get("axes.color_cycle", ["#333333"])[0]
if self.params['position'] == 'jitter':
pinfo['x'] *= np.random.uniform(.9, 1.1, len(pinfo['x']))
pinfo['y'] *= np.random.uniform(.9, 1.1, len(pinfo['y']))
ax.scatter(**pinfo)
| {
"repo_name": "bitemyapp/ggplot",
"path": "ggplot/geoms/geom_point.py",
"copies": "12",
"size": "1296",
"license": "bsd-2-clause",
"hash": 6871380385975002000,
"line_mean": 35,
"line_max": 81,
"alpha_frac": 0.5516975309,
"autogenerated": false,
"ratio": 3.5604395604395602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.029625556012944095,
"num_lines": 36
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import matplotlib.pyplot as plt
from cycler import cycler
class theme(object):
def __init__(self):
self._rcParams = {}
def __radd__(self, other):
if other.__class__.__name__=="ggplot":
other.theme = self
return other
return self
def get_rcParams(self):
return self._rcParams
def apply_final_touches(self, ax):
pass
class theme_bw(theme_gray):
"""
White background w/ black gridlines
"""
def __init__(self):
super(theme_bw, self).__init__()
self._rcParams['axes.facecolor'] = 'white'
class theme_xkcd(theme):
"""
xkcd theme
The theme internaly uses the settings from pyplot.xkcd().
"""
def __init__(self, scale=1, length=100, randomness=2):
super(theme_xkcd, self).__init__()
with plt.xkcd(scale=scale, length=length, randomness=randomness):
_xkcd = mpl.rcParams.copy()
# no need to a get a deprecate warning for nothing...
for key in mpl._deprecated_map:
if key in _xkcd:
del _xkcd[key]
if 'tk.pythoninspect' in _xkcd:
del _xkcd['tk.pythoninspect']
self._rcParams.update(_xkcd)
def __deepcopy__(self, memo):
class _empty(object):
pass
result = _empty()
result.__class__ = self.__class__
result.__dict__["_rcParams"] = {}
for k, v in self._rcParams.items():
try:
result.__dict__["_rcParams"][k] = deepcopy(v, memo)
except NotImplementedError:
# deepcopy raises an error for objects that are drived from or
# composed of matplotlib.transform.TransformNode.
# Not desirable, but probably requires upstream fix.
# In particular, XKCD uses matplotlib.patheffects.withStrok
# -gdowding
result.__dict__["_rcParams"][k] = copy(v)
return result
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/themes/themes.py",
"copies": "1",
"size": "2119",
"license": "bsd-2-clause",
"hash": 7144213200170780000,
"line_mean": 29.2714285714,
"line_max": 78,
"alpha_frac": 0.5578102879,
"autogenerated": false,
"ratio": 4.082851637764932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0023764151358970967,
"num_lines": 70
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib as mpl
import matplotlib.pyplot as plt
from .theme import theme_base
class theme_xkcd(theme_base):
"""
xkcd theme
The theme internaly uses the settings from pyplot.xkcd().
"""
def __init__(self, scale=1, length=100, randomness=2):
super(theme_xkcd, self).__init__()
with plt.xkcd(scale=scale, length=length, randomness=randomness):
_xkcd = mpl.rcParams.copy()
# no need to a get a deprecate warning for nothing...
for key in mpl._deprecated_map:
if key in _xkcd:
del _xkcd[key]
if 'tk.pythoninspect' in _xkcd:
del _xkcd['tk.pythoninspect']
self._rcParams.update(_xkcd)
def __deepcopy__(self, memo):
class _empty(object):
pass
result = _empty()
result.__class__ = self.__class__
result.__dict__["_rcParams"] = {}
for k, v in self._rcParams.items():
try:
result.__dict__["_rcParams"][k] = deepcopy(v, memo)
except NotImplementedError:
# deepcopy raises an error for objects that are drived from or
# composed of matplotlib.transform.TransformNode.
# Not desirable, but probably requires upstream fix.
# In particular, XKCD uses matplotlib.patheffects.withStrok
# -gdowding
result.__dict__["_rcParams"][k] = copy(v)
return result
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/themes/theme_xkcd.py",
"copies": "1",
"size": "1582",
"license": "bsd-2-clause",
"hash": -3912748430783926300,
"line_mean": 36.6666666667,
"line_max": 78,
"alpha_frac": 0.5676359039,
"autogenerated": false,
"ratio": 4.152230971128609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5219866875028608,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.cbook as cbook
import numpy as np
import pandas as pd
import datetime
def format_ticks(ticks):
are_ints = True
for t in ticks:
try:
if int(t)!=t:
are_ints = False
except:
return ticks
if are_ints==True:
return [int(t) for t in ticks]
return ticks
def is_sequence_of_strings(obj):
"""
Returns true if *obj* is iterable and contains strings
"""
# Note: cbook.is_sequence_of_strings has a bug because
# a numpy array of strings is recognized as being
# string_like and therefore not a sequence of strings
if not cbook.iterable(obj):
return False
if not isinstance(obj, np.ndarray) and cbook.is_string_like(obj):
return False
for o in obj:
if not cbook.is_string_like(o):
return False
return True
def is_sequence_of_booleans(obj):
"""
Return True if *obj* is array-like and contains boolean values
"""
if not cbook.iterable(obj):
return False
_it = (isinstance(x, bool) for x in obj)
if all(_it):
return True
return False
def is_categorical(obj):
"""
Return True if *obj* is array-like and has categorical values
Categorical values include:
- strings
- booleans
"""
try:
float(obj.iloc[0])
return False
except:
return True
if is_sequence_of_strings(obj):
return True
if is_sequence_of_booleans(obj):
return True
return False
def is_iterable(obj):
try:
iter(obj)
return True
except:
return False
date_types = (
pd.tslib.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
datetime.datetime,
datetime.time
)
def is_date(x):
return isinstance(x, date_types)
def calc_n_bins(series):
"https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width"
q75, q25 = np.percentile(series, [75 , 25])
iqr = q75 - q25
h = (2 * iqr) / (len(series)**(1/3.))
k = (series.max() - series.min()) / h
return k
def sorted_unique(series):
"""Return the unique values of *series*, correctly sorted."""
# This handles Categorical data types, which sorted(series.unique()) fails
# on. series.drop_duplicates() is slower than Series(series.unique()).
return list(pd.Series(series.unique()).sort_values())
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/utils.py",
"copies": "1",
"size": "2517",
"license": "bsd-2-clause",
"hash": -1366573969233840000,
"line_mean": 23.2019230769,
"line_max": 78,
"alpha_frac": 0.6114421931,
"autogenerated": false,
"ratio": 3.7069219440353463,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48183641371353464,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
from copy import deepcopy
from .geom import geom
import pandas as pd
import numpy as np
from ggplot.components import smoothers
class stat_smooth(geom):
VALID_AES = ['x', 'y', 'color', 'alpha', 'label', 'se', 'linestyle', 'method', 'span', 'level', 'window']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
if 'x' in layer:
x = layer.pop('x')
if 'y' in layer:
y = layer.pop('y')
if 'se' in layer:
se = layer.pop('se')
else:
se = None
if 'span' in layer:
span = layer.pop('span')
else:
span = 2/3.
if 'window' in layer:
window = layer.pop('window')
else:
window = int(np.ceil(len(x) / 10.0))
if 'level' in layer:
level = layer.pop('level')
else:
level = 0.95
if 'method' in layer:
method = layer.pop('method')
else:
method = None
idx = np.argsort(x)
x = np.array(x)[idx]
y = np.array(y)[idx]
if method == "lm":
y, y1, y2 = smoothers.lm(x, y, 1-level)
elif method == "ma":
y, y1, y2 = smoothers.mavg(x, y, window=window)
else:
y, y1, y2 = smoothers.lowess(x, y, span=span)
plt.plot(x, y, **layer)
if se==True:
plt.fill_between(x, y1, y2, alpha=0.2, color="grey")
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/stat_smooth.py",
"copies": "1",
"size": "1664",
"license": "bsd-2-clause",
"hash": -7774066151986999000,
"line_mean": 29.8148148148,
"line_max": 109,
"alpha_frac": 0.5030048077,
"autogenerated": false,
"ratio": 3.3821138211382116,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9371754832141082,
"avg_score": 0.0026727593394260058,
"num_lines": 54
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
from .geom import geom
from scipy.stats import gaussian_kde
import numpy as np
class geom_density(geom):
VALID_AES = ['x', 'color', 'alpha', 'linestyle', 'fill', 'label']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
if 'x' in layer:
x = layer.pop('x')
else:
raise Exception("geom_density(): Need a aesthetic x mapping!")
if 'fill' in layer:
fill = layer.pop('fill')
else:
fill = None
try:
float(x[0])
except:
try:
# try to use it as a pandas.tslib.Timestamp
x = [ts.toordinal() for ts in x]
except:
raise Exception("geom_density(): aesthetic x mapping needs to be convertable to float!")
kde = gaussian_kde(x)
bottom = np.min(x)
top = np.max(x)
step = (top - bottom) / 1000.0
x = np.arange(bottom, top, step)
y = kde.evaluate(x)
plt.plot(x, y, **layer)
if fill:
plt.fill_between(x, y1=np.zeros(len(x)), y2=y, **layer)
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/geom_density.py",
"copies": "1",
"size": "1348",
"license": "bsd-2-clause",
"hash": -4577069524395204000,
"line_mean": 32.7,
"line_max": 113,
"alpha_frac": 0.5252225519,
"autogenerated": false,
"ratio": 3.6630434782608696,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9662395163089343,
"avg_score": 0.005174173414304993,
"num_lines": 40
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
from itertools import groupby
from operator import itemgetter
from .geom import geom
class geom_step(geom):
VALID_AES = ['x', 'y', 'color', 'alpha', 'linestyle', 'label', 'size',
'group']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
if 'x' in layer:
x = layer.pop('x')
if 'y' in layer:
y = layer.pop('y')
if 'size' in layer:
layer['markersize'] = layer['size']
del layer['size']
if 'linestyle' in layer and 'color' not in layer:
layer['color'] = 'k'
x_stepped = []
y_stepped = []
for i in range(len(x) - 1):
x_stepped.append(x[i])
x_stepped.append(x[i+1])
y_stepped.append(y[i])
y_stepped.append(y[i])
if 'group' not in layer:
plt.plot(x_stepped, y_stepped, **layer)
else:
g = layer.pop('group')
for k, v in groupby(sorted(zip(x_stepped, y_stepped, g), key=itemgetter(2)), key=itemgetter(2)):
x_g, y_g, _ = zip(*v)
plt.plot(x_g, y_g, **layer)
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/geom_step.py",
"copies": "1",
"size": "1362",
"license": "bsd-2-clause",
"hash": 7677359010303357000,
"line_mean": 33.9230769231,
"line_max": 108,
"alpha_frac": 0.5161527166,
"autogenerated": false,
"ratio": 3.396508728179551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9404312936078185,
"avg_score": 0.0016697017402733704,
"num_lines": 39
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import re
import six
def tex_escape(text):
"""
:param text: a plain text message
:return: the message escaped to appear correctly in LaTeX
"""
conv = {
'&': r'\&',
'%': r'\%',
'$': r'\$',
'#': r'\#',
'_': r'\_',
'{': r'\{',
'}': r'\}',
'~': r'\textasciitilde{}',
'^': r'\^{}',
'\\': r'\textbackslash{}',
'<': r'\textless',
'>': r'\textgreater',
}
regex = re.compile('|'.join(re.escape(six.text_type(key)) for key in sorted(conv.keys(), key = lambda item: - len(item))))
return regex.sub(lambda match: conv[match.group()], text)
def color_legend(color):
# TODO: need outline on line
return plt.Line2D([0],[0], color=color, linewidth=5)
def size_legend(size):
return plt.Line2D([0],[0], color='black', marker='o', linestyle='None', markersize=size**.5)
def alpha_legend(alpha):
return plt.Line2D([0],[0], color='black', marker='o', linestyle='None', alpha=alpha)
def shape_legend(shape):
return plt.Line2D([0],[0], color='black', marker=shape, linestyle='None')
def linetype_legend(linetype):
return plt.Line2D([0],[0], color='black', linestyle=linetype)
def make_aesthetic_legend(aesthetic, value):
if aesthetic=='color':
return color_legend(value)
elif aesthetic=='fill':
return color_legend(value)
elif aesthetic=='size':
return size_legend(value)
elif aesthetic=='alpha':
return alpha_legend(value)
elif aesthetic=='shape':
return shape_legend(value)
elif aesthetic=='linetype':
return linetype_legend(value)
else:
print(aesthetic + " not found")
def make_legend(ax, legend_mapping):
# TODO: for some reason this reaks havoc! but this is also how you would do a bold legend :(
# plt.rc('text', usetex=True)
extra = Rectangle((0, 0), 0, 0, facecolor="w", fill=False, edgecolor='none', linewidth=0)
items = []
labels = []
for aesthetic in ['color', 'fill', 'shape', 'alpha', 'size', 'linetype']:
if aesthetic in legend_mapping:
items.append(extra)
colname = legend_mapping[aesthetic]['name']
# spacer = r'\n' if len(labels) > 0 else r''
spacer = '\n' if len(labels) > 0 else ''
# TODO: this is supposed to make the label bold
# labels.append(spacer + r'\textbf{' + colname + '}')
labels.append(spacer + colname)
for key in sorted(legend_mapping[aesthetic]['lookup'].keys()):
value = legend_mapping[aesthetic]['lookup'][key]
legend_item = make_aesthetic_legend(aesthetic, value)
items.append(legend_item)
labels.append(key)
legend = ax.legend(items, labels, loc='center left', bbox_to_anchor=(1.05, 0.5), fontsize='small', frameon=False)
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/legend.py",
"copies": "1",
"size": "3072",
"license": "bsd-2-clause",
"hash": -5832804939356563000,
"line_mean": 34.3103448276,
"line_max": 126,
"alpha_frac": 0.5807291667,
"autogenerated": false,
"ratio": 3.479048697621744,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9522951898806276,
"avg_score": 0.007365193103093732,
"num_lines": 87
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import Normalize
import numpy as np
from .geom import geom
import numpy as np
class geom_point(geom):
VALID_AES = ['x', 'y', 'size', 'color', 'alpha', 'shape', 'label', 'cmap',
'position']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
if "size" in layer:
layer["s"] = layer["size"]
del layer["size"]
if "shape" in layer:
layer["marker"] = layer["shape"]
del layer["shape"]
# for some reason, scatter doesn't default to the same color styles
# as the axes.color_cycle
if "color" not in layer and "cmap" not in layer:
layer["color"] = mpl.rcParams.get("axes.color_cycle", ["#333333"])[0]
if "position" in layer:
del layer["position"]
layer['x'] *= np.random.uniform(.9, 1.1, len(layer['x']))
layer['y'] *= np.random.uniform(.9, 1.1, len(layer['y']))
plt.scatter(**layer)
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/geom_point.py",
"copies": "1",
"size": "1250",
"license": "bsd-2-clause",
"hash": 1238490531284855000,
"line_mean": 32.7837837838,
"line_max": 81,
"alpha_frac": 0.5632,
"autogenerated": false,
"ratio": 3.787878787878788,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48510787878787875,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
from .geom import geom
class geom_text(geom):
VALID_AES = ['label','x','y','alpha','angle','color','family','fontface',
'hjust','size','vjust']
REQUIRED_AES = ['label','x','y']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
# Check for required aesthetics
missing_aes = []
for required_aes in self.REQUIRED_AES:
if required_aes not in layer:
missing_aes.append(required_aes)
if len(missing_aes) > 0:
raise Exception(
"geom_text requires the following missing aesthetics: %s" %\
", ".join(missing_aes))
x = layer.pop('x')
y = layer.pop('y')
label = layer.pop('label')
# before taking max and min make sure x is not empty
if len(x) == 0:
return
# plt.text does not resize axes, must do manually
xmax = max(x)
xmin = min(x)
ymax = max(y)
ymin = min(y)
margin = 0.1
xmargin = (xmax - xmin) * margin
ymargin = (ymax - ymin) * margin
xmax = xmax + xmargin
xmin = xmin - xmargin
ymax = ymax + ymargin
ymin = ymin - ymargin
# Take current plotting dimension in account for the case that we
# work on a special dataframe just for this geom!
if not self.data is None:
ax = plt.gca()
cxmin, cxmax = ax.get_xlim()
cymin, cymax = ax.get_ylim()
# there is a problem if geom_text is the first plot, as
# then the dimension are 0-1 for all axis :-(
xmax = max(xmax, cxmax)
xmin = min(xmin, cxmin)
ymax = max(ymax, cymax)
ymin = min(ymin, cymin)
if 'hjust' in layer:
x = (np.array(x) + layer['hjust']).tolist()
del layer['hjust']
else:
layer['horizontalalignment'] = 'center'
if 'vjust' in layer:
y = (np.array(y) + layer['vjust']).tolist()
del layer['vjust']
else:
layer['verticalalignment'] = 'center'
if 'angle' in layer:
layer['rotation'] = layer['angle']
del layer['angle']
for x_g,y_g,s in zip(x,y,label):
plt.text(x_g,y_g,s,**layer)
# resize axes
plt.axis([xmin, xmax, ymin, ymax])
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/geom_text.py",
"copies": "1",
"size": "2674",
"license": "bsd-2-clause",
"hash": 274904704060330850,
"line_mean": 30.8333333333,
"line_max": 77,
"alpha_frac": 0.5258040389,
"autogenerated": false,
"ratio": 3.82,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48458040389,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from .geom import geom
import pandas as pd
import numpy as np
import scipy.stats as stats
class stat_function(geom):
"""
Superimpose a function onto a plot
Uses a
Parameters
----------
x : list, 1darray
x values of data
fun : function
Function to draw.
n : int
Number of points to interpolate over. Must be greater than zero.
Defaults to 101.
color : str
Color to draw function with.
args : list, dict, object
List or dict of additional arguments to pass to function. If neither
list or dict, object is passed as second argument.
Examples
--------
Sin vs cos.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
gg = ggplot(pd.DataFrame({'x':np.arange(10)}),aes(x='x'))
gg = gg + stat_function(fun=np.sin,color="red")
gg = gg + stat_function(fun=np.cos,color="blue")
print(gg)
Compare random sample density to normal distribution.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.normal(size=100)
# normal distribution function
def dnorm(n):
return (1.0 / np.sqrt(2 * np.pi)) * (np.e ** (-0.5 * (n ** 2)))
data = pd.DataFrame({'x':x})
gg = ggplot(aes(x='x'),data=data) + geom_density()
gg = gg + stat_function(fun=dnorm,n=150)
print(gg)
Passing additional arguments to function as list.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.randn(100)
to_the_power_of = lambda n, p: n ** p
y = x ** 3
y += np.random.randn(100) # add noise
data = pd.DataFrame({'x':x,'y':y})
gg = ggplot(aes(x='x',y='y'),data=data) + geom_point()
gg = gg + stat_function(fun=to_the_power_of,args=[3])
print(gg)
Passing additional arguments to function as dict.
.. plot::
:include-source:
import scipy
import numpy as np
import pandas as pd
from ggplot import *
def dnorm(x, mean, var):
return scipy.stats.norm(mean,var).pdf(x)
data = pd.DataFrame({'x':np.arange(-5,6)})
gg = ggplot(aes(x='x'),data=data)
gg = gg + stat_function(fun=dnorm,color="blue",args={'mean':0.0,'var':0.2})
gg = gg + stat_function(fun=dnorm,color="red",args={'mean':0.0,'var':1.0})
gg = gg + stat_function(fun=dnorm,color="yellow",args={'mean':0.0,'var':5.0})
gg = gg + stat_function(fun=dnorm,color="green",args={'mean':-2.0,'var':0.5})
print(gg)
"""
VALID_AES = ['x','fun','n','color','args']
REQUIRED_AES = ['x','fun']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
miss_aes = [aes for aes in self.REQUIRED_AES if aes not in layer]
if(miss_aes):
raise Exception("stat_function requires the following " +
"missing aesthetics: %s" % ", ".join(miss_aes))
x = layer.pop('x')
fun = layer.pop('fun')
if 'args' in layer:
args = layer.pop('args')
old_fun = fun
if isinstance(args,list):
fun = lambda x: old_fun(x,*args)
elif isinstance(args,dict):
fun = lambda x: old_fun(x,**args)
else:
fun = lambda x: olf_fun(x,args)
color = None if 'color' not in layer else layer.pop('color')
n = 101 if 'n' not in layer else layer.pop('n')
x_min = min(x)
x_max = max(x)
x_values = np.linspace(x_min,x_max,n)
y_values = list(map(fun,x_values))
if color:
plt.plot(x_values,y_values,color=color)
else:
plt.plot(x_values,y_values)
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/stat_function.py",
"copies": "1",
"size": "4266",
"license": "bsd-2-clause",
"hash": -6447286946856201000,
"line_mean": 29.9130434783,
"line_max": 85,
"alpha_frac": 0.5379746835,
"autogenerated": false,
"ratio": 3.5285359801488836,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45665106636488834,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .geom import geom
from pandas.lib import Timestamp
class geom_bar(geom):
VALID_AES = ['x', 'color', 'alpha', 'fill', 'label', 'weight', 'position']
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
x = layer.pop('x')
if 'weight' not in layer:
counts = pd.value_counts(x)
labels = counts.index.tolist()
weights = counts.tolist()
else:
# TODO: pretty sure this isn't right
weights = layer.pop('weight')
if not isinstance(x[0], Timestamp):
labels = x
else:
df = pd.DataFrame({'weights':weights, 'timepoint': pd.to_datetime(x)})
df = df.set_index('timepoint')
ts = pd.TimeSeries(df.weights, index=df.index)
ts = ts.resample('W', how='sum')
ts = ts.fillna(0)
weights = ts.values.tolist()
labels = ts.index.to_pydatetime().tolist()
indentation = np.arange(len(labels)) + 0.2
width = 0.9
idx = np.argsort(labels)
labels, weights = np.array(labels)[idx], np.array(weights)[idx]
labels = sorted(labels)
if 'color' in layer:
layer['edgecolor'] = layer['color']
del layer['color']
else:
layer['edgecolor'] = '#333333'
if 'fill' in layer:
layer['color'] = layer['fill']
del layer['fill']
else:
layer['color'] = '#333333'
plt.bar(indentation, weights, width, **layer)
plt.autoscale()
return [
{"function": "set_xticks", "args": [indentation+width/2]},
{"function": "set_xticklabels", "args": [labels]}
]
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/geom_bar.py",
"copies": "1",
"size": "2035",
"license": "bsd-2-clause",
"hash": -3544330590642899000,
"line_mean": 33.4915254237,
"line_max": 86,
"alpha_frac": 0.5228501229,
"autogenerated": false,
"ratio": 3.99803536345776,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003896356906292616,
"num_lines": 59
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import matplotlib.pyplot as plt
import sys
from .geom import geom
class geom_histogram(geom):
VALID_AES = ['x', 'color', 'alpha', 'label', 'binwidth']
def __init__(self, *args, **kwargs):
super(geom_histogram, self).__init__(*args, **kwargs)
self._warning_printed = False
def plot_layer(self, layer):
layer = dict((k, v) for k, v in layer.items() if k in self.VALID_AES)
layer.update(self.manual_aes)
if 'binwidth' in layer:
binwidth = layer.pop('binwidth')
try:
binwidth = float(binwidth)
bottom = plt.np.nanmin(layer['x'])
top = plt.np.nanmax(layer['x'])
layer['bins'] = plt.np.arange(bottom, top + binwidth, binwidth)
except:
pass
if 'bins' not in layer:
layer['bins'] = 30
if not self._warning_printed:
sys.stderr.write("binwidth defaulted to range/30. " +
"Use 'binwidth = x' to adjust this.\n")
self._warning_printed = True
plt.hist(**layer)
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/geoms/geom_histogram.py",
"copies": "1",
"size": "1251",
"license": "bsd-2-clause",
"hash": 5875014641859520000,
"line_mean": 35.7941176471,
"line_max": 79,
"alpha_frac": 0.5243804956,
"autogenerated": false,
"ratio": 3.984076433121019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9960910416509996,
"avg_score": 0.009509302442204505,
"num_lines": 34
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .geom import geom
class geom_text(geom):
DEFAULT_AES = {'alpha': None, 'angle': 0, 'color': 'black', 'family': None,
'fontface': 1, 'hjust': None, 'size': 12, 'vjust': None,
'lineheight': 1.2}
REQUIRED_AES = {'label','x','y'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity', 'parse': False}
_aes_renames = {'angle': 'rotation', 'lineheight': 'linespacing'}
_units = {'alpha', 'color', 'family', 'size'}
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
label = pinfo.pop('label')
# TODO: Deal with the fontface
# from ggplot2
# 1 = plain, 2 = bold, 3 = italic, 4 = bold italic
# "plain", "bold", "italic", "oblique", and "bold.italic"
pinfo.pop('fontface')
# before taking max and min make sure x is not empty
if len(x) == 0:
return
# plt.text does not resize axes, must do manually
xmax = max(x)
xmin = min(x)
ymax = max(y)
ymin = min(y)
margin = 0.1
xmargin = (xmax - xmin) * margin
ymargin = (ymax - ymin) * margin
xmax = xmax + xmargin
xmin = xmin - xmargin
ymax = ymax + ymargin
ymin = ymin - ymargin
# Take current plotting dimension in account for the case that we
# work on a special dataframe just for this geom!
if not self.data is None: # NOTE: not working??
cxmin, cxmax = ax.get_xlim()
cymin, cymax = ax.get_ylim()
# there is a problem if geom_text is the first plot, as
# then the dimension are 0-1 for all axis :-(
xmax = max(xmax, cxmax)
xmin = min(xmin, cxmin)
ymax = max(ymax, cymax)
ymin = min(ymin, cymin)
# TODO: Fix the defaults for this
# try out 0.5
if pinfo['hjust'] is not None:
x = (np.array(x) + pinfo['hjust']).tolist()
else:
pinfo['horizontalalignment'] = 'center'
if pinfo['vjust'] is not None:
y = (np.array(y) + pinfo['vjust']).tolist()
else:
pinfo['verticalalignment'] = 'center'
del pinfo['hjust']
del pinfo['vjust']
for x_g,y_g,s in zip(x,y,label):
ax.text(x_g,y_g,s,**pinfo)
# TODO: Find out why this isn't working as desired
# resize axes
ax.axis([xmin, xmax, ymin, ymax])
| {
"repo_name": "Cophy08/ggplot",
"path": "ggplot/geoms/geom_text.py",
"copies": "12",
"size": "2616",
"license": "bsd-2-clause",
"hash": -655024807016559400,
"line_mean": 33.88,
"line_max": 81,
"alpha_frac": 0.5244648318,
"autogenerated": false,
"ratio": 3.5737704918032787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0040234046540905954,
"num_lines": 75
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from ._overlap import _compute_overlap
__all__ = ['compute_overlap']
def compute_overlap(ilon, ilat, olon, olat):
"""Compute the overlap between two 'pixels' in spherical coordinates.
Parameters
----------
ilon : np.ndarray with shape (N, 4)
The longitudes (in radians) defining the four corners of the input pixel
ilat : np.ndarray with shape (N, 4)
The latitudes (in radians) defining the four corners of the input pixel
olon : np.ndarray with shape (N, 4)
The longitudes (in radians) defining the four corners of the output pixel
olat : np.ndarray with shape (N, 4)
The latitudes (in radians) defining the four corners of the output pixel
Returns
-------
overlap : np.ndarray of length N
Pixel overlap solid angle in steradians
area_ratio : np.ndarray of length N
TODO
"""
ilon = np.asarray(ilon, dtype=np.float64)
ilat = np.asarray(ilat, dtype=np.float64)
olon = np.asarray(olon, dtype=np.float64)
olat = np.asarray(olat, dtype=np.float64)
return _compute_overlap(ilon, ilat, olon, olat)
| {
"repo_name": "bsipocz/reproject",
"path": "reproject/spherical_intersect/overlap.py",
"copies": "1",
"size": "1255",
"license": "bsd-2-clause",
"hash": -1962850640461981000,
"line_mean": 34.8571428571,
"line_max": 81,
"alpha_frac": 0.6501992032,
"autogenerated": false,
"ratio": 3.702064896755162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48522640999551625,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from pandas.lib import Timestamp
import pandas as pd
import statsmodels.api as sm
from statsmodels.nonparametric.smoothers_lowess import lowess as smlowess
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.stats.outliers_influence import summary_table
import scipy.stats as stats
import datetime
date_types = (
pd.tslib.Timestamp,
pd.DatetimeIndex,
pd.Period,
pd.PeriodIndex,
datetime.datetime,
datetime.time
)
_isdate = lambda x: isinstance(x, date_types)
SPAN = 2/3.
ALPHA = 0.05 # significance level for confidence interval
def _snakify(txt):
txt = txt.strip().lower()
return '_'.join(txt.split())
def _plot_friendly(value):
if not isinstance(value, (np.ndarray, pd.Series)):
value = pd.Series(value)
return value
def lm(x, y, alpha=ALPHA):
"fits an OLS from statsmodels. returns tuple."
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
X = sm.add_constant(x)
fit = sm.OLS(y, X).fit()
prstd, iv_l, iv_u = wls_prediction_std(fit)
_, summary_values, summary_names = summary_table(fit, alpha=alpha)
df = pd.DataFrame(summary_values, columns=map(_snakify, summary_names))
# TODO: indexing w/ data frame is messing everything up
fittedvalues = df['predicted_value'].values
predict_mean_ci_low = df['mean_ci_95%_low'].values
predict_mean_ci_upp = df['mean_ci_95%_upp'].values
predict_ci_low = df['predict_ci_95%_low'].values
predict_ci_upp = df['predict_ci_95%_upp'].values
if x_is_date:
x = [Timestamp.fromordinal(int(i)) for i in x]
return (x, fittedvalues, predict_mean_ci_low, predict_mean_ci_upp)
def lowess(x, y, span=SPAN):
"returns y-values estimated using the lowess function in statsmodels."
"""
for more see
statsmodels.nonparametric.smoothers_lowess.lowess
"""
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
result = smlowess(np.array(y), np.array(x), frac=span)
x = pd.Series(result[::,0])
y = pd.Series(result[::,1])
lower, upper = stats.t.interval(span, len(x), loc=0, scale=2)
std = np.std(y)
y1 = pd.Series(lower * std + y)
y2 = pd.Series(upper * std + y)
if x_is_date:
x = [Timestamp.fromordinal(int(i)) for i in x]
return (x, y, y1, y2)
def mavg(x,y, window):
"compute moving average"
x, y = map(_plot_friendly, [x,y])
x_is_date = _isdate(x.iloc[0])
if x_is_date:
x = np.array([i.toordinal() for i in x])
std_err = pd.rolling_std(y, window)
y = pd.rolling_mean(y, window)
y1 = y - std_err
y2 = y + std_err
if x_is_date:
x = [Timestamp.fromordinal(int(i)) for i in x]
return (x, y, y1, y2)
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/stats/smoothers.py",
"copies": "1",
"size": "2981",
"license": "bsd-2-clause",
"hash": 42093328699704510,
"line_mean": 31.7582417582,
"line_max": 75,
"alpha_frac": 0.6340154311,
"autogenerated": false,
"ratio": 2.9573412698412698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9063391005725319,
"avg_score": 0.005593139043190333,
"num_lines": 91
} |
from __future__ import absolute_import, division, print_function, \
unicode_literals
import numpy as np
from sklearn.preprocessing import LabelEncoder
def binary_ks_curve(y_true, y_probas):
"""This function generates the points necessary to calculate the KS
Statistic curve.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_probas (array-like, shape (n_samples)): Probability predictions of
the positive class.
Returns:
thresholds (numpy.ndarray): An array containing the X-axis values for
plotting the KS Statistic plot.
pct1 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
pct2 (numpy.ndarray): An array containing the Y-axis values for one
curve of the KS Statistic plot.
ks_statistic (float): The KS Statistic, or the maximum vertical
distance between the two curves.
max_distance_at (float): The X-axis value at which the maximum vertical
distance between the two curves is seen.
classes (np.ndarray, shape (2)): An array containing the labels of the
two classes making up `y_true`.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The KS Statistic
is only relevant in binary classification.
"""
y_true, y_probas = np.asarray(y_true), np.asarray(y_probas)
lb = LabelEncoder()
encoded_labels = lb.fit_transform(y_true)
if len(lb.classes_) != 2:
raise ValueError('Cannot calculate KS statistic for data with '
'{} category/ies'.format(len(lb.classes_)))
idx = encoded_labels == 0
data1 = np.sort(y_probas[idx])
data2 = np.sort(y_probas[np.logical_not(idx)])
ctr1, ctr2 = 0, 0
thresholds, pct1, pct2 = [], [], []
while ctr1 < len(data1) or ctr2 < len(data2):
# Check if data1 has no more elements
if ctr1 >= len(data1):
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1
# Check if data2 has no more elements
elif ctr2 >= len(data2):
current = data1[ctr1]
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1
else:
if data1[ctr1] > data2[ctr2]:
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1
elif data1[ctr1] < data2[ctr2]:
current = data1[ctr1]
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1
else:
current = data2[ctr2]
while ctr2 < len(data2) and current == data2[ctr2]:
ctr2 += 1
while ctr1 < len(data1) and current == data1[ctr1]:
ctr1 += 1
thresholds.append(current)
pct1.append(ctr1)
pct2.append(ctr2)
thresholds = np.asarray(thresholds)
pct1 = np.asarray(pct1) / float(len(data1))
pct2 = np.asarray(pct2) / float(len(data2))
if thresholds[0] != 0:
thresholds = np.insert(thresholds, 0, [0.0])
pct1 = np.insert(pct1, 0, [0.0])
pct2 = np.insert(pct2, 0, [0.0])
if thresholds[-1] != 1:
thresholds = np.append(thresholds, [1.0])
pct1 = np.append(pct1, [1.0])
pct2 = np.append(pct2, [1.0])
differences = pct1 - pct2
ks_statistic, max_distance_at = (np.max(differences),
thresholds[np.argmax(differences)])
return thresholds, pct1, pct2, ks_statistic, max_distance_at, lb.classes_
def validate_labels(known_classes, passed_labels, argument_name):
"""Validates the labels passed into the true_labels or pred_labels
arguments in the plot_confusion_matrix function.
Raises a ValueError exception if any of the passed labels are not in the
set of known classes or if there are duplicate labels. Otherwise returns
None.
Args:
known_classes (array-like):
The classes that are known to appear in the data.
passed_labels (array-like):
The labels that were passed in through the argument.
argument_name (str):
The name of the argument being validated.
Example:
>>> known_classes = ["A", "B", "C"]
>>> passed_labels = ["A", "B"]
>>> validate_labels(known_classes, passed_labels, "true_labels")
"""
known_classes = np.array(known_classes)
passed_labels = np.array(passed_labels)
unique_labels, unique_indexes = np.unique(passed_labels, return_index=True)
if len(passed_labels) != len(unique_labels):
indexes = np.arange(0, len(passed_labels))
duplicate_indexes = indexes[~np.in1d(indexes, unique_indexes)]
duplicate_labels = [str(x) for x in passed_labels[duplicate_indexes]]
msg = "The following duplicate labels were passed into {0}: {1}" \
.format(argument_name, ", ".join(duplicate_labels))
raise ValueError(msg)
passed_labels_absent = ~np.in1d(passed_labels, known_classes)
if np.any(passed_labels_absent):
absent_labels = [str(x) for x in passed_labels[passed_labels_absent]]
msg = ("The following labels "
"were passed into {0}, "
"but were not found in "
"labels: {1}").format(argument_name, ", ".join(absent_labels))
raise ValueError(msg)
return
def cumulative_gain_curve(y_true, y_score, pos_label=None):
"""This function generates the points necessary to plot the Cumulative Gain
Note: This implementation is restricted to the binary classification task.
Args:
y_true (array-like, shape (n_samples)): True labels of the data.
y_score (array-like, shape (n_samples)): Target scores, can either be
probability estimates of the positive class, confidence values, or
non-thresholded measure of decisions (as returned by
decision_function on some classifiers).
pos_label (int or str, default=None): Label considered as positive and
others are considered negative
Returns:
percentages (numpy.ndarray): An array containing the X-axis values for
plotting the Cumulative Gains chart.
gains (numpy.ndarray): An array containing the Y-axis values for one
curve of the Cumulative Gains chart.
Raises:
ValueError: If `y_true` is not composed of 2 classes. The Cumulative
Gain Chart is only relevant in binary classification.
"""
y_true, y_score = np.asarray(y_true), np.asarray(y_score)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
sorted_indices = np.argsort(y_score)[::-1]
y_true = y_true[sorted_indices]
gains = np.cumsum(y_true)
percentages = np.arange(start=1, stop=len(y_true) + 1)
gains = gains / float(np.sum(y_true))
percentages = percentages / float(len(y_true))
gains = np.insert(gains, 0, [0])
percentages = np.insert(percentages, 0, [0])
return percentages, gains
| {
"repo_name": "reiinakano/scikit-plot",
"path": "scikitplot/helpers.py",
"copies": "1",
"size": "7732",
"license": "mit",
"hash": -1560035604005248500,
"line_mean": 35.3004694836,
"line_max": 79,
"alpha_frac": 0.6060527677,
"autogenerated": false,
"ratio": 3.738878143133462,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4844930910833462,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import math
def drange(start, stop, step):
"""Compute the steps in between start and stop
Only steps which are a multiple of `step` are used.
"""
r = ((start // step) * step) + step # the first step higher than start
# all subsequent steps are multiple of "step"!
while r < stop:
yield r
r += step
def convert_if_int(x):
if int(x)==x:
return int(x)
else:
return x
def convertable_to_int(x):
if int(x)==x:
return True
else:
return False
def calc_axis_breaks_and_limits(minval, maxval, nlabs=None):
"""Calculates axis breaks and suggested limits.
The limits are computed as minval/maxval -/+ 1/3 step of ticks.
Parameters
----------
minval : number
lowest value on this axis
maxval : number
higest number on this axis
nlabs : int
number of labels which should be displayed on the axis
Default: None
"""
if nlabs is None:
diff = maxval - minval
base10 = math.log10(diff)
power = math.floor(base10)
base_unit = 10**power
step = base_unit / 2
else:
diff = maxval - minval
tick_range = diff / float(nlabs)
# make the tick range nice looking...
power = math.ceil(math.log(tick_range, 10))
step = np.round(tick_range / (10**power), 1) * 10**power
labs = list(drange(minval-(step/3), maxval+(step/3), step))
if all([convertable_to_int(lab) for lab in labs]):
labs = [convert_if_int(lab) for lab in labs]
return labs, minval-(step/3), maxval+(step/3)
| {
"repo_name": "assad2012/ggplot",
"path": "ggplot/scales/utils.py",
"copies": "12",
"size": "1733",
"license": "bsd-2-clause",
"hash": -5200086375520864000,
"line_mean": 26.078125,
"line_max": 74,
"alpha_frac": 0.5937680323,
"autogenerated": false,
"ratio": 3.695095948827292,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0037065282322996207,
"num_lines": 64
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde
from ggplot.utils import make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
# TODO: switch to statsmodels kdes
class stat_density(stat):
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'density', 'position': 'stack',
'kernel': 'gaussian', 'adjust': 1, 'trim': False}
CREATES = {'y'}
def _calculate(self, data):
x = data.pop('x')
try:
float(x.iloc[0])
except:
try:
# try to use it as a pandas.tslib.Timestamp
x = [ts.toordinal() for ts in x]
except:
raise GgplotError("stat_density(): aesthetic x mapping " +
"needs to be convertable to float!")
# TODO: Implement weight
try:
weight = data.pop('weight')
except KeyError:
weight = np.ones(len(x))
# TODO: Get "full" range of densities
# i.e tail off to zero like ggplot2? But there is nothing
# wrong with the current state.
kde = gaussian_kde(x)
bottom = np.min(x)
top = np.max(x)
step = (top - bottom) / 1000.0
x = np.arange(bottom, top, step)
y = kde.evaluate(x)
new_data = pd.DataFrame({'x': x, 'y': y})
# Copy the other aesthetics into the new dataframe
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| {
"repo_name": "wllmtrng/ggplot",
"path": "ggplot/stats/stat_density.py",
"copies": "12",
"size": "1690",
"license": "bsd-2-clause",
"hash": 2230873071767475000,
"line_mean": 30.8867924528,
"line_max": 74,
"alpha_frac": 0.549112426,
"autogenerated": false,
"ratio": 3.7555555555555555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_bar(geom):
DEFAULT_AES = {'alpha': None, 'color': None, 'fill': '#333333',
'linetype': 'solid', 'size': 1.0, 'weight': None, 'y': None, 'width' : None}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'bin', 'position': 'stack'}
_extra_requires = {'y', 'width'}
_aes_renames = {'linetype': 'linestyle', 'size': 'linewidth',
'fill': 'color', 'color': 'edgecolor'}
# NOTE: Currently, geom_bar does not support mapping
# to alpha and linestyle. TODO: raise exception
_units = {'edgecolor', 'color', 'alpha', 'linestyle', 'linewidth'}
def __init__(self, *args, **kwargs):
# TODO: Change self.__class__ to geom_bar
super(geom_bar, self).__init__(*args, **kwargs)
self.bottom = None
self.ax = None
def _plot_unit(self, pinfo, ax):
categorical = is_categorical(pinfo['x'])
pinfo.pop('weight')
x = pinfo.pop('x')
width_elem = pinfo.pop('width')
# If width is unspecified, default is an array of 1's
if width_elem == None:
width = np.ones(len(x))
else :
width = np.array(width_elem)
# Make sure bottom is initialized and get heights. If we are working on
# a new plot (using facet_wrap or grid), then reset bottom
_reset = self.bottom == None or (self.ax != None and self.ax != ax)
self.bottom = np.zeros(len(x)) if _reset else self.bottom
self.ax = ax
heights = np.array(pinfo.pop('y'))
# layout and spacing
#
# matplotlib needs the left of each bin and it's width
# if x has numeric values then:
# - left = x - width/2
# otherwise x is categorical:
# - left = cummulative width of previous bins starting
# at zero for the first bin
#
# then add a uniform gap between each bin
# - the gap is a fraction of the width of the first bin
# and only applies when x is categorical
_left_gap = 0
_spacing_factor = 0 # of the bin width
if not categorical:
left = np.array([x[i]-width[i]/2 for i in range(len(x))])
else:
_left_gap = 0.2
_spacing_factor = 0.105 # of the bin width
_breaks = np.append([0], width)
left = np.cumsum(_breaks[:-1])
_sep = width[0] * _spacing_factor
left = left + _left_gap + [_sep * i for i in range(len(left))]
ax.bar(left, heights, width, bottom=self.bottom, **pinfo)
ax.autoscale()
if categorical:
ax.set_xticks(left+width/2)
ax.set_xticklabels(x)
# Update bottom positions
self.bottom = heights + self.bottom
| {
"repo_name": "benslice/ggplot",
"path": "ggplot/geoms/geom_bar.py",
"copies": "11",
"size": "3061",
"license": "bsd-2-clause",
"hash": 7263265846881768000,
"line_mean": 35.4404761905,
"line_max": 95,
"alpha_frac": 0.5619078732,
"autogenerated": false,
"ratio": 3.7466340269277847,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9808541900127785,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from .geom import geom
from ggplot.utils import is_string
from ggplot.utils import is_categorical
class geom_boxplot(geom):
DEFAULT_AES = {'y': None, 'color': 'black', 'flier_marker': '+'}
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
def __group(self, x, y):
out = {}
for xx, yy in zip(x,y):
if yy not in out: out[yy] = []
out[yy].append(xx)
return out
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
color = pinfo.pop('color')
fliermarker = pinfo.pop('flier_marker')
if y is not None:
g = self.__group(x,y)
l = sorted(g.keys())
x = [g[k] for k in l]
q = ax.boxplot(x, vert=False)
plt.setp(q['boxes'], color=color)
plt.setp(q['whiskers'], color=color)
plt.setp(q['fliers'], color=color, marker=fliermarker)
if l:
plt.setp(ax, yticklabels=l)
| {
"repo_name": "wllmtrng/ggplot",
"path": "ggplot/geoms/geom_boxplot.py",
"copies": "12",
"size": "1218",
"license": "bsd-2-clause",
"hash": 3821423309244489700,
"line_mean": 28.7073170732,
"line_max": 68,
"alpha_frac": 0.5599343186,
"autogenerated": false,
"ratio": 3.2830188679245285,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9842953186524528,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from ggplot.components import smoothers
from ggplot.utils import make_iterable_ntimes
from .stat import stat
class stat_smooth(stat):
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'smooth', 'position': 'identity', 'method': 'auto',
'se': True, 'n': 80, 'fullrange': False, 'level': 0.95,
'span': 2/3., 'window': None}
CREATES = {'ymin', 'ymax'}
def _calculate(self, data):
# sort data by x and
# convert x and y to lists so that the Series index
# does not mess with the smoothing functions
data = data.sort(['x'])
x = list(data.pop('x'))
y = list(data.pop('y'))
se = self.params['se']
level = self.params['level']
method = self.params['method']
span = self.params['span']
window = self.params['window']
if window is None:
window = int(np.ceil(len(x) / 10.0))
# TODO: fix the smoothers
# - lm : y1, y2 are NaNs
# - mvg: investigate unexpected looking output
if method == "lm":
x, y, y1, y2 = smoothers.lm(x, y, 1-level)
elif method == "ma":
x, y, y1, y2 = smoothers.mavg(x, y, window=window)
else:
x, y, y1, y2 = smoothers.lowess(x, y, span=span)
new_data = pd.DataFrame({'x': x, 'y': y})
if se:
new_data['ymin'] = y1
new_data['ymax'] = y2
# Copy the other aesthetics into the new dataframe
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| {
"repo_name": "andnovar/ggplot",
"path": "ggplot/stats/stat_smooth.py",
"copies": "12",
"size": "1774",
"license": "bsd-2-clause",
"hash": 2883677740331013600,
"line_mean": 31.8518518519,
"line_max": 81,
"alpha_frac": 0.5394588501,
"autogenerated": false,
"ratio": 3.4115384615384614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9950997311638461,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from ggplot.utils import make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
class stat_function(stat):
"""
Superimpose a function onto a plot
Uses a
Parameters
----------
x : list, 1darray
x values of data
fun : function
Function to draw.
n : int
Number of points to interpolate over. Must be greater than zero.
Defaults to 101.
color : str
Color to draw function with.
args : list, dict, object
List or dict of additional arguments to pass to function. If neither
list or dict, object is passed as second argument.
Examples
--------
Sin vs cos.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
gg = ggplot(pd.DataFrame({'x':np.arange(10)}),aes(x='x'))
gg = gg + stat_function(fun=np.sin,color="red")
gg = gg + stat_function(fun=np.cos,color="blue")
print(gg)
Compare random sample density to normal distribution.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.normal(size=100)
# normal distribution function
def dnorm(n):
return (1.0 / np.sqrt(2 * np.pi)) * (np.e ** (-0.5 * (n ** 2)))
data = pd.DataFrame({'x':x})
gg = ggplot(aes(x='x'),data=data) + geom_density()
gg = gg + stat_function(fun=dnorm,n=150)
print(gg)
Passing additional arguments to function as list.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.randn(100)
to_the_power_of = lambda n, p: n ** p
y = x ** 3
y += np.random.randn(100) # add noise
data = pd.DataFrame({'x':x,'y':y})
gg = ggplot(aes(x='x',y='y'),data=data) + geom_point()
gg = gg + stat_function(fun=to_the_power_of,args=[3])
print(gg)
Passing additional arguments to function as dict.
.. plot::
:include-source:
import scipy
import numpy as np
import pandas as pd
from ggplot import *
def dnorm(x, mean, var):
return scipy.stats.norm(mean,var).pdf(x)
data = pd.DataFrame({'x':np.arange(-5,6)})
gg = ggplot(aes(x='x'),data=data)
gg = gg + stat_function(fun=dnorm,color="blue",args={'mean':0.0,'var':0.2})
gg = gg + stat_function(fun=dnorm,color="red",args={'mean':0.0,'var':1.0})
gg = gg + stat_function(fun=dnorm,color="yellow",args={'mean':0.0,'var':5.0})
gg = gg + stat_function(fun=dnorm,color="green",args={'mean':-2.0,'var':0.5})
print(gg)
"""
# TODO: Should not have a required aesthetic, use the scale information
# maybe that is where the "scale trainning" helps
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'path', 'position': 'identity', 'fun': None,
'n': 101, 'args': None}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
CREATES = {'y'}
def _calculate(self, data):
x = data.pop('x')
fun = self.params['fun']
n = self.params['n']
args = self.params['args']
if not hasattr(fun, '__call__'):
raise GgplotError("stat_function requires parameter 'fun' to be " +
"a function or any other callable object")
old_fun = fun
if isinstance(args,list):
fun = lambda x: old_fun(x, *args)
elif isinstance(args,dict):
fun = lambda x: old_fun(x, **args)
elif args is not None:
fun = lambda x: old_fun(x, args)
else:
fun = lambda x: old_fun(x)
x = np.linspace(x.min(), x.max(),n)
y = list(map(fun, x))
new_data = pd.DataFrame({'x': x, 'y': y})
# Copy the other aesthetics into the new dataframe
# Don't copy the any previous 'y' assignments
try:
del data['y']
except KeyError:
pass
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| {
"repo_name": "mizzao/ggplot",
"path": "ggplot/stats/stat_function.py",
"copies": "12",
"size": "4439",
"license": "bsd-2-clause",
"hash": -3369415084848312000,
"line_mean": 28.7919463087,
"line_max": 85,
"alpha_frac": 0.548772246,
"autogenerated": false,
"ratio": 3.5512,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import scipy
import scipy.stats
# BCES fitting
# ===============
def bces(y1,y1err,y2,y2err,cerr):
"""
Does the entire regression calculation for 4 slopes:
OLS(Y|X), OLS(X|Y), bisector, orthogonal.
Fitting form: Y=AX+B.
Usage:
>>> a,b,aerr,berr,covab=bces(x,xerr,y,yerr,cov)
Output:
- a,b : best-fit parameters a,b of the linear regression
- aerr,berr : the standard deviations in a,b
- covab : the covariance between a and b (e.g. for plotting confidence bands)
Arguments:
- x,y : data
- xerr,yerr: measurement errors affecting x and y
- cov : covariance between the measurement errors
(all are arrays)
v1 Mar 2012: ported from bces_regress.f. Added covariance output.
Rodrigo Nemmen
"""
# Arrays holding the code main results for each method:
# Elements: 0-Y|X, 1-X|Y, 2-bisector, 3-orthogonal
a,b,avar,bvar,covarxiz,covar_ba=np.zeros(4),np.zeros(4),np.zeros(4),np.zeros(4),np.zeros(4),np.zeros(4)
# Lists holding the xi and zeta arrays for each method above
xi,zeta=[],[]
# Calculate sigma's for datapoints using length of conf. intervals
sig11var = np.mean( y1err**2 )
sig22var = np.mean( y2err**2 )
sig12var = np.mean( cerr )
# Covariance of Y1 (X) and Y2 (Y)
covar_y1y2 = np.mean( (y1-y1.mean())*(y2-y2.mean()) )
# Compute the regression slopes
a[0] = (covar_y1y2 - sig12var)/(y1.var() - sig11var) # Y|X
a[1] = (y2.var() - sig22var)/(covar_y1y2 - sig12var) # X|Y
a[2] = ( a[0]*a[1] - 1.0 + np.sqrt((1.0 + a[0]**2)*(1.0 + a[1]**2)) ) / (a[0]+a[1]) # bisector
if covar_y1y2<0:
sign = -1.
else:
sign = 1.
a[3] = 0.5*((a[1]-(1./a[0])) + sign*np.sqrt(4.+(a[1]-(1./a[0]))**2)) # orthogonal
# Compute intercepts
for i in range(4):
b[i]=y2.mean()-a[i]*y1.mean()
# Set up variables to calculate standard deviations of slope/intercept
xi.append( ( (y1-y1.mean()) * (y2-a[0]*y1-b[0]) + a[0]*y1err**2 ) / (y1.var()-sig11var) ) # Y|X
xi.append( ( (y2-y2.mean()) * (y2-a[1]*y1-b[1]) - y2err**2 ) / covar_y1y2 ) # X|Y
xi.append( xi[0] * (1.+a[1]**2)*a[2] / ((a[0]+a[1])*np.sqrt((1.+a[0]**2)*(1.+a[1]**2))) + xi[1] * (1.+a[0]**2)*a[2] / ((a[0]+a[1])*np.sqrt((1.+a[0]**2)*(1.+a[1]**2))) ) # bisector
xi.append( xi[0] * a[3]/(a[0]**2*np.sqrt(4.+(a[1]-1./a[0])**2)) + xi[1]*a[3]/np.sqrt(4.+(a[1]-1./a[0])**2) ) # orthogonal
for i in range(4):
zeta.append( y2 - a[i]*y1 - y1.mean()*xi[i] )
for i in range(4):
# Calculate variance for all a and b
avar[i]=xi[i].var()/xi[i].size
bvar[i]=zeta[i].var()/zeta[i].size
# Sample covariance obtained from xi and zeta (paragraph after equation 15 in AB96)
covarxiz[i]=np.mean( (xi[i]-xi[i].mean()) * (zeta[i]-zeta[i].mean()) )
# Covariance between a and b (equation after eq. 15 in AB96)
covar_ab=covarxiz/y1.size
return a,b,np.sqrt(avar),np.sqrt(bvar),covar_ab
def bootstrap(v):
"""
Constructs Monte Carlo simulated data set using the
Bootstrap algorithm.
Usage:
>>> bootstrap(x)
where x is either an array or a list of arrays. If it is a
list, the code returns the corresponding list of bootstrapped
arrays assuming that the same position in these arrays map the
same "physical" object.
"""
if type(v)==list:
vboot=[] # list of boostrapped arrays
n=v[0].size
iran=scipy.random.randint(0,n,n) # Array of random indexes
for x in v: vboot.append(x[iran])
else: # if v is an array, not a list of arrays
n=v.size
iran=scipy.random.randint(0,n,n) # Array of random indexes
vboot=v[iran]
return vboot
def bcesboot(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Does the BCES with bootstrapping.
Usage:
>>> a,b,aerr,berr,covab=bcesboot(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b -- best-fit parameters a,b of the linear regression
:returns: aerr,berr -- the standard deviations in a,b
:returns: covab -- the covariance between a and b (e.g. for plotting confidence bands)
.. note:: this method is definitely not nearly as fast as bces_regress.f. Needs to be optimized. Maybe adapt the fortran routine using f2python?
"""
import tqdm
print("Bootstrapping progress:")
"""
My convention for storing the results of the bces code below as
matrixes for processing later are as follow:
simulation-method y|x x|y bisector orthogonal
sim0 ...
Am = sim1 ...
sim2 ...
sim3 ...
"""
for i in tqdm.tqdm(range(nsim)):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=np.vstack((am,asim))
bm=np.vstack((bm,bsim))
if True in np.isnan(am):
am,bm=checkNan(am,bm)
# Bootstrapping results
a=np.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=np.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=np.zeros(4),np.zeros(4),np.zeros(4)
for i in range(4):
erra[i]=np.sqrt( 1./(nsim-1) * ( np.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=np.sqrt( 1./(nsim-1) * ( np.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( np.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
return a,b,erra,errb,covab
def checkNan(am,bm):
"""
Sometimes, if the dataset is very small, the regression parameters in
some instances of the bootstrapped sample may have NaNs i.e. failed
regression (I need to investigate this in more details).
This method checks to see if there are NaNs in the bootstrapped
fits and remove them from the final sample.
"""
import nmmn.lsd
idel=nmmn.lsd.findnan(am[:,2])
print("Bootstrapping error: regression failed in",np.size(idel),"instances. They were removed.")
return np.delete(am,idel,0),np.delete(bm,idel,0)
# Methods which make use of parallelization
# ===========================================
def ab(x):
"""
This method is the big bottleneck of the parallel BCES code. That's the
reason why I put these calculations in a separate method, in order to
distribute this among the cores. In the original BCES method, this is
inside the main routine.
Argument:
[y1,y1err,y2,y2err,cerr,nsim]
where nsim is the number of bootstrapping trials sent to each core.
:returns: am,bm : the matrixes with slope and intercept where each line corresponds to a bootrap trial and each column maps a different BCES method (ort, y|x etc).
Be very careful and do not use lambda functions when calling this
method and passing it to multiprocessing or ipython.parallel!
I spent >2 hours figuring out why the code was not working until I
realized the reason was the use of lambda functions.
"""
y1,y1err,y2,y2err,cerr,nsim=x[0],x[1],x[2],x[3],x[4],x[5]
for i in range(int(nsim)):
[y1sim,y1errsim,y2sim,y2errsim,cerrsim]=bootstrap([y1,y1err,y2,y2err,cerr])
asim,bsim,errasim,errbsim,covabsim=bces(y1sim,y1errsim,y2sim,y2errsim,cerrsim)
if i==0:
# Initialize the matrixes
am,bm=asim.copy(),bsim.copy()
else:
am=np.vstack((am,asim))
bm=np.vstack((bm,bsim))
return am,bm
def bcesp(y1,y1err,y2,y2err,cerr,nsim=10000):
"""
Parallel implementation of the BCES with bootstrapping.
Divide the bootstraps equally among the threads (cores) of
the machine. It will automatically detect the number of
cores available.
Usage:
>>> a,b,aerr,berr,covab=bcesp(x,xerr,y,yerr,cov,nsim)
:param x,y: data
:param xerr,yerr: measurement errors affecting x and y
:param cov: covariance between the measurement errors (all are arrays)
:param nsim: number of Monte Carlo simulations (bootstraps)
:returns: a,b - best-fit parameters a,b of the linear regression
:returns: aerr,berr - the standard deviations in a,b
:returns: covab - the covariance between a and b (e.g. for plotting confidence bands)
.. seealso:: Check out ~/work/projects/playground/parallel python/bcesp.py for the original, testing, code. I deleted some line from there to make the "production" version.
* v1 Mar 2012: serial version ported from bces_regress.f. Added covariance output.
* v2 May 3rd 2012: parallel version ported from nemmen.bcesboot.
.. codeauthor: Rodrigo Nemmen
"""
import time # for benchmarking
import multiprocessing
print("BCES,", nsim,"trials... ")
tic=time.time()
# Find out number of cores available
ncores=multiprocessing.cpu_count()
# We will divide the processing into how many parts?
n=2*ncores
"""
Must create lists that will be distributed among the many
cores with structure
core1 <- [y1,y1err,y2,y2err,cerr,nsim/n]
core2 <- [y1,y1err,y2,y2err,cerr,nsim/n]
etc...
"""
pargs=[] # this is a list of lists!
for i in range(n):
pargs.append([y1,y1err,y2,y2err,cerr,nsim/n])
# Initializes the parallel engine
pool = multiprocessing.Pool(processes=ncores) # multiprocessing package
"""
Each core processes ab(input)
return matrixes Am,Bm with the results of nsim/n
presult[i][0] = Am with nsim/n lines
presult[i][1] = Bm with nsim/n lines
"""
presult=pool.map(ab, pargs) # multiprocessing
pool.close() # close the parallel engine
# vstack the matrixes processed from all cores
i=0
for m in presult:
if i==0:
# Initialize the matrixes
am,bm=m[0].copy(),m[1].copy()
else:
am=np.vstack((am,m[0]))
bm=np.vstack((bm,m[1]))
i=i+1
if True in np.isnan(am):
am,bm=checkNan(am,bm)
# Computes the bootstrapping results on the stacked matrixes
a=np.array([ am[:,0].mean(),am[:,1].mean(),am[:,2].mean(),am[:,3].mean() ])
b=np.array([ bm[:,0].mean(),bm[:,1].mean(),bm[:,2].mean(),bm[:,3].mean() ])
# Error from unbiased sample variances
erra,errb,covab=np.zeros(4),np.zeros(4),np.zeros(4)
for i in range(4):
erra[i]=np.sqrt( 1./(nsim-1) * ( np.sum(am[:,i]**2)-nsim*(am[:,i].mean())**2 ))
errb[i]=np.sqrt( 1./(nsim-1) * ( np.sum(bm[:,i]**2)-nsim*(bm[:,i].mean())**2 ))
covab[i]=1./(nsim-1) * ( np.sum(am[:,i]*bm[:,i])-nsim*am[:,i].mean()*bm[:,i].mean() )
print("%f s" % (time.time() - tic))
return a,b,erra,errb,covab
| {
"repo_name": "rsnemmen/BCES",
"path": "bces/bces.py",
"copies": "1",
"size": "10452",
"license": "mit",
"hash": -8815893280019946000,
"line_mean": 29.2080924855,
"line_max": 180,
"alpha_frac": 0.6557596632,
"autogenerated": false,
"ratio": 2.588410104011887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.797117966743486,
"avg_score": 0.1545980199554056,
"num_lines": 346
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import six
SHAPES = [
'o',#circle
'^',#triangle up
'D',#diamond
'v',#triangle down
'+',#plus
'x',#x
's',#square
'*',#star
'p',#pentagon
'*'#octagon
]
def shape_gen():
while True:
for shape in SHAPES:
yield shape
def assign_shapes(data, aes, gg):
"""Assigns shapes to the given data based on the aes and adds the right legend
Parameters
----------
data : DataFrame
dataframe which should have shapes assigned to
aes : aesthetic
mapping, including a mapping from shapes to variable
gg : ggplot
object, which holds information and gets a legend assigned
Returns
-------
data : DataFrame
the changed dataframe
"""
if 'shape' in aes:
shape_col = aes['shape']
possible_shapes = np.unique(data[shape_col])
shape = shape_gen()
# marker in matplotlib are not unicode ready in 1.3.1 :-( -> use explicit str()...
shape_mapping = dict((value, str(six.next(shape))) for value in possible_shapes)
data['shape_mapping'] = data[shape_col].apply(lambda x: shape_mapping[x])
gg.add_to_legend("marker", dict((v, k) for k, v in shape_mapping.items()))
return data
| {
"repo_name": "eco32i/ggplot",
"path": "ggplot/components/shapes.py",
"copies": "1",
"size": "1379",
"license": "bsd-2-clause",
"hash": 9215212319028130000,
"line_mean": 26.0392156863,
"line_max": 90,
"alpha_frac": 0.5910079768,
"autogenerated": false,
"ratio": 3.757493188010899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4848501164810899,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import functools
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.8f')
from glob import glob
import numpy as np
from astroquery.simbad import Simbad
from astropy.table import Table
from astropy.io import ascii
from astropy.time import Time
from .catalog import query_catalog_for_object
from .activity import Measurement, SIndex, StarProps
__all__ = ['glob_spectra_paths', 'stars_to_json', 'json_to_stars',
'parse_hires']
results_dir = '/astro/users/bmmorris/Dropbox/Apps/ShareLaTeX/CaII_HAT-P-11/results/'
def glob_spectra_paths(data_dir, target_names):
"""
Collect paths to spectrum FITS files.
Parameters
----------
data_dir : str or list
Paths to the directories containing spectrum FITS files
target_names : list
String patterns that match the beginning of files with targets to
collect.
Returns
-------
spectra_paths : list
List of paths to spectrum FITS files
"""
if type(data_dir) != list:
data_dir = [data_dir]
all_spectra_paths = []
for d_dir in data_dir:
# Collect files for each target:
spectra_paths_lists = [glob(os.path.join(d_dir,
'{0}*.wfrmcpc.fits'.format(name)))
for name in target_names]
# Reduce to one list:
spectra_paths = functools.reduce(list.__add__, spectra_paths_lists)
all_spectra_paths.extend(spectra_paths)
return all_spectra_paths
def construct_standard_star_table(star_list, write_to=results_dir):
names = []
sp_types = []
s_mwo = []
sigma_mwo = []
for star in star_list:
names.append(star.upper())
customSimbad = Simbad()
customSimbad.add_votable_fields('sptype')
sp_type = customSimbad.query_object(star)['SP_TYPE'][0]
sp_types.append(sp_type)
star_mwo_tbl = query_catalog_for_object(star)
s_mwo.append(star_mwo_tbl['Smean'])
sigma_mwo.append(star_mwo_tbl['e_Smean'])
standard_table = Table([names, sp_types, s_mwo, sigma_mwo],
names=['Star', 'Sp.~Type', '$S_{MWO}$', '$\sigma_{MWO}$'])
latexdict = dict(col_align='l l c c', preamble=r'\begin{center}',
tablefoot=r'\end{center}',
caption=r'Stars observed to calibrate the $S$-index '
r'(see Section~\ref{sec:def_s_index}). \label{tab:cals}',
data_start=r'\hline')
output_path = os.path.join(results_dir, 'cal_stars.tex')
# output_path,
ascii.write(standard_table, format='latex', latexdict=latexdict)
def floats_to_strings(d):
dictionary = d.copy()
for key in dictionary:
dictionary[key] = str(dictionary[key])
return dictionary
def stars_to_json(star_list, output_path='star_data.json'):
"""
Save list of stellar properties to a JSON file.
Parameters
----------
star_list : list of `StarProps`
Star properties to save to json
output_path : str
File path to output
"""
stars_attrs = star_list[0].__dict__.keys()
all_data = dict()
for star in star_list:
star_data = dict()
for attr in stars_attrs:
value = getattr(star, attr)
if isinstance(value, Measurement):
value = floats_to_strings(value.__dict__)
elif isinstance(value, SIndex):
value = value.to_dict()
else:
value = str(value)
star_data[attr] = value
all_data[star.name + '; ' + str(star.time.datetime)] = star_data
with open(output_path, 'w') as w:
json.dump(all_data, w, indent=4, sort_keys=True)
def json_to_stars(json_path):
"""
Loads JSON archive into list of `StarProps` objects.
Parameters
----------
json_path : str
Path to saved stellar properties
Returns
-------
stars : list of `StarProps`
List of stellar properties.
"""
with open(json_path, 'r') as w:
dictionary = json.load(w)
stars = [StarProps.from_dict(dictionary[star]) for star in dictionary]
return stars
def parse_hires(path):
text_file = open(path, 'r').read().splitlines()
header_line = text_file[0].split()
data = {header: [] for header in header_line}
for line in text_file[1:]:
split_line = line.split()
for i, header in enumerate(header_line):
if header in ['Signal/Noise', 'ModJD', 'S-value']:
data[header].append(float(split_line[i]))
else:
j = 1 if len(split_line) > len(header_line) else 0
data[header].append(split_line[i+j] + split_line[i])
table = Table(data)
floats = np.array(table['ModJD'].data) + 2440000.0 #+ 4e4 - 0.5
table['time'] = Time(floats, format='jd')
return table
| {
"repo_name": "bmorris3/boyajian_star_arces",
"path": "toolkit/utils.py",
"copies": "1",
"size": "5065",
"license": "mit",
"hash": -3571135596646832600,
"line_mean": 26.8296703297,
"line_max": 86,
"alpha_frac": 0.5855873643,
"autogenerated": false,
"ratio": 3.5394828791055204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.462507024340552,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import hashlib
from datetime import timedelta
from cryptacular.bcrypt import BCRYPTPasswordManager
from sqlalchemy import Column, types
from . import utils
from .base import Base
from .image import ImageMixin
from .user_mixin import UserMixin
__all__ = ['User']
class User(Base, ImageMixin, UserMixin):
__tablename__ = 'users'
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(types.Integer, primary_key=True)
name = Column(types.Unicode(255), nullable=False)
email = Column(types.String(255), nullable=False, unique=True)
hashed_password = Column(types.String(60), nullable=True)
password_reset_token = Column(types.String(64), nullable=False, default='')
password_reset_time = Column(types.DateTime, nullable=False,
default=utils.utcnow)
enabled = Column(types.Boolean, nullable=False, default=True)
url_path = Column(types.String(255), nullable=True, unique=True)
location = Column(types.Unicode(255), nullable=False, default=u'')
@staticmethod
def hash_password(password):
"""
Hash a password to store it in the database or verify against a
database.
The default bcrypt work factor is 12, but that makes logins a bit
slow, so we use 11.
:param password:
Plaintext password, as a unicode string.
:return:
Bcrypt-hashed password. If provided password is ``None``, returns
``None``.
"""
if password is None:
return None
else:
assert len(password) < 255, \
"passwords > 255 characters not allowed"
manager = BCRYPTPasswordManager()
return manager.encode(password, rounds=11)
@staticmethod
def generate_token():
"""
Generate a password reset token.
:return:
Return a nonce to be used in URLs for validating password resets.
"""
s = os.urandom(256) + str(id({})).encode('utf8')
return hashlib.sha256(s).hexdigest()
def set_reset_password_token(self):
"""
Generate a password reset token, set it, and return it. If there is an
existing reset token that was generated in the last 60 seconds, don't
generate a new one, and just return the existing one.
:return:
Nonce as created by generate_token().
"""
utcnow = utils.utcnow()
# Check to make sure the password token wasn't just generated: if it
# was, return the same one. If it doesn't exist, force generation.
if (not self.password_reset_token or
self.password_reset_time < utcnow - timedelta(hours=6)):
self.password_reset_time = utcnow
self.password_reset_token = User.generate_token()
return self.password_reset_token
def clear_reset_password_token(self):
"""
Clear any previously set password reset token.
"""
self.password_reset_token = ''
self.password_reset_time = utils.utcnow()
def update_password(self, password):
"""
Given a new plaintext password, hash it and update the password field.
:param password:
Plaintext password, as a unicode string.
"""
self.hashed_password = User.hash_password(password)
def check_password(self, password):
"""
Check a plaintext password against our hashed password.
:param password:
Plaintext password, as a unicode string.
:return:
True if the password is correct, False otherwise.
"""
assert len(password) < 255, "passwords > 255 characters not allowed"
hsh = self.hashed_password
manager = BCRYPTPasswordManager()
return hsh and manager.check(hsh, password)
def has_permission(self, permission_name):
# XXX Implement this, obviously.
return True
| {
"repo_name": "storborg/warpworks",
"path": "warpworks/model/user.py",
"copies": "1",
"size": "4070",
"license": "mit",
"hash": -2779442904341444600,
"line_mean": 32.6363636364,
"line_max": 79,
"alpha_frac": 0.6253071253,
"autogenerated": false,
"ratio": 4.348290598290598,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5473597723590597,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
import subprocess
import sys
import tempfile
from distutils.core import Command
from .compat import _fix_user_options
PY3 = sys.version_info[0] == 3
class AstropyTest(Command, object):
description = 'Run the tests for this package'
user_options = [
('package=', 'P',
"The name of a specific package to test, e.g. 'io.fits' or 'utils'. "
"If nothing is specified, all default tests are run."),
('test-path=', 't',
'Specify a test location by path. If a relative path to a '
'.py file, it is relative to the built package. If a relative '
'path to a .rst file, it is relative to the docs directory '
'(see --docs-path). May also be an absolute path.'),
('verbose-results', 'V',
'Turn on verbose output from pytest.'),
('plugins=', 'p',
'Plugins to enable when running pytest.'),
('pastebin=', 'b',
"Enable pytest pastebin output. Either 'all' or 'failed'."),
('args=', 'a',
'Additional arguments to be passed to pytest.'),
('remote-data', 'R', 'Run tests that download remote data.'),
('pep8', '8',
'Enable PEP8 checking and disable regular tests. '
'Requires the pytest-pep8 plugin.'),
('pdb', 'd',
'Start the interactive Python debugger on errors.'),
('coverage', 'c',
'Create a coverage report. Requires the coverage package.'),
('open-files', 'o', 'Fail if any tests leave files open.'),
('parallel=', 'j',
'Run the tests in parallel on the specified number of '
'CPUs. If negative, all the cores on the machine will be '
'used. Requires the pytest-xdist plugin.'),
('docs-path=', None,
'The path to the documentation .rst files. If not provided, and '
'the current directory contains a directory called "docs", that '
'will be used.'),
('skip-docs', None,
"Don't test the documentation .rst files.")
]
user_options = _fix_user_options(user_options)
package_name = ''
def initialize_options(self):
self.package = None
self.test_path = None
self.verbose_results = False
self.plugins = None
self.pastebin = None
self.args = None
self.remote_data = False
self.pep8 = False
self.pdb = False
self.coverage = False
self.open_files = False
self.parallel = 0
self.docs_path = None
self.skip_docs = False
def finalize_options(self):
# Normally we would validate the options here, but that's handled in
# run_tests
pass
def run(self):
try:
import astropy
except ImportError:
raise ImportError(
"The 'test' command requires the astropy package to be "
"installed and importable.")
self.reinitialize_command('build', inplace=False)
self.run_command('build')
build_cmd = self.get_finalized_command('build')
new_path = os.path.abspath(build_cmd.build_lib)
if self.docs_path is None:
if os.path.exists('docs'):
self.docs_path = os.path.abspath('docs')
# Copy the build to a temporary directory for the purposes of testing
# - this avoids creating pyc and __pycache__ directories inside the
# build directory
tmp_dir = tempfile.mkdtemp(prefix=self.package_name + '-test-')
testing_path = os.path.join(tmp_dir, os.path.basename(new_path))
shutil.copytree(new_path, testing_path)
shutil.copy('setup.cfg', testing_path)
cmd_pre = ''
cmd_post = ''
try:
if self.coverage:
if self.parallel != 0:
raise ValueError(
"--coverage can not be used with --parallel")
try:
import coverage
except ImportError:
raise ImportError(
"--coverage requires that the coverage package is "
"installed.")
# Don't use get_pkg_data_filename here, because it
# requires importing astropy.config and thus screwing
# up coverage results for those packages.
coveragerc = os.path.join(
testing_path, self.package_name, 'tests', 'coveragerc')
# We create a coveragerc that is specific to the version
# of Python we're running, so that we can mark branches
# as being specifically for Python 2 or Python 3
with open(coveragerc, 'r') as fd:
coveragerc_content = fd.read()
if PY3:
ignore_python_version = '2'
else:
ignore_python_version = '3'
coveragerc_content = coveragerc_content.replace(
"{ignore_python_version}", ignore_python_version).replace(
"{packagename}", self.package_name)
tmp_coveragerc = os.path.join(tmp_dir, 'coveragerc')
with open(tmp_coveragerc, 'wb') as tmp:
tmp.write(coveragerc_content.encode('utf-8'))
cmd_pre = (
'import coverage; '
'cov = coverage.coverage(data_file="{0}", config_file="{1}"); '
'cov.start();'.format(
os.path.abspath(".coverage"), tmp_coveragerc))
cmd_post = (
'cov.stop(); '
'from astropy.tests.helper import _save_coverage; '
'_save_coverage(cov, result, "{0}", "{1}");'.format(
os.path.abspath('.'), testing_path))
if PY3:
set_flag = "import builtins; builtins._ASTROPY_TEST_ = True"
else:
set_flag = "import __builtin__; __builtin__._ASTROPY_TEST_ = True"
cmd = ('{cmd_pre}{0}; import {1.package_name}, sys; result = ('
'{1.package_name}.test('
'package={1.package!r}, '
'test_path={1.test_path!r}, '
'args={1.args!r}, '
'plugins={1.plugins!r}, '
'verbose={1.verbose_results!r}, '
'pastebin={1.pastebin!r}, '
'remote_data={1.remote_data!r}, '
'pep8={1.pep8!r}, '
'pdb={1.pdb!r}, '
'open_files={1.open_files!r}, '
'parallel={1.parallel!r}, '
'docs_path={1.docs_path!r}, '
'skip_docs={1.skip_docs!r})); '
'{cmd_post}'
'sys.exit(result)')
cmd = cmd.format(set_flag, self, cmd_pre=cmd_pre, cmd_post=cmd_post)
# Run the tests in a subprocess--this is necessary since
# new extension modules may have appeared, and this is the
# easiest way to set up a new environment
# Remove temporary directory
# On Python 3.x prior to 3.3, the creation of .pyc files
# is not atomic. py.test jumps through some hoops to make
# this work by parsing import statements and carefully
# importing files atomically. However, it can't detect
# when __import__ is used, so its carefulness still fails.
# The solution here (admittedly a bit of a hack), is to
# turn off the generation of .pyc files altogether by
# passing the `-B` switch to `python`. This does mean
# that each core will have to compile .py file to bytecode
# itself, rather than getting lucky and borrowing the work
# already done by another core. Compilation is an
# insignificant fraction of total testing time, though, so
# it's probably not worth worrying about.
retcode = subprocess.call([sys.executable, '-B', '-c', cmd],
cwd=testing_path, close_fds=False)
finally:
shutil.rmtree(tmp_dir)
raise SystemExit(retcode)
| {
"repo_name": "eteq/astropy-helpers",
"path": "astropy_helpers/test_helpers.py",
"copies": "1",
"size": "8430",
"license": "bsd-3-clause",
"hash": 57216073260580950,
"line_mean": 40.3235294118,
"line_max": 83,
"alpha_frac": 0.534282325,
"autogenerated": false,
"ratio": 4.329738058551618,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5364020383551618,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import sys
import logging
from astropy.io.fits.verify import VerifyError
from ccdproc import ImageFileCollection
from ..core import fix_keywords, identify_technique
class DataClassifier(object):
"""Classifies the data being presented to the pipeline.
Data classifier is intended to define the camera that is being used and the
technique in use. This will be used later to make important decisions
regarding the process to be used.
"""
def __init__(self):
"""Initialization method for the DataClassifier class
The general arguments of the program are parsed and become part of the
class attributes. The rest of attributes are initialized as None.
"""
self.log = logging.getLogger(__name__)
self.raw_path = None
self.nights_dict = None
self.instrument = None
self.image_collection = None
self.objects_collection = None
self.technique = None
def __repr__(self):
"""String representation of the information contained."""
return str("Raw Path: {:s}\n"
"Instrument: {:s} Camera\n"
"Observing Technique: {:s}".format(self.raw_path,
self.instrument,
self.technique))
def __call__(self, raw_path):
"""Call method for the DataClassifier class
This method call specific method that define all the attributes of the
class. The intention is to define the instrument and technique in use.
Args:
raw_path (str): Full Path to raw data
"""
self.raw_path = raw_path
# define the ImageFileCollection instance right away.
try:
ifc = ImageFileCollection(self.raw_path)
except VerifyError as error: # pragma: no cover
self.log.error("Raised VerifyError: {:}".format(error))
self.log.critical("Some keywords are not FITS compliant. Trying "
"to fix the headers.")
fix_keywords(path=self.raw_path)
self.log.info("Headers have been fixed, please rerun the pipeline!")
sys.exit()
self.image_collection = ifc.summary.to_pandas()
self.objects_collection = self.image_collection[
self.image_collection.obstype != 'BIAS']
self.nights_dict = {}
self.log.debug('Raw path: {:s}'.format(self.raw_path))
self._get_instrument()
if self.instrument is not None:
self.log.info('Instrument: {:s} Camera'.format(self.instrument))
else:
self.log.critical("Unable to determine which camera was used.")
self.log.info("Make sure you only have 'Blue' or 'Red' camera data "
"only, not both.")
sys.exit()
self._get_obs_technique()
if self.technique is not None:
self.log.info('Observing Technique: {:s}'.format(self.technique))
# else:
# self.log.critical("Unable to determine observing technique used.")
# sys.exit()
if self.instrument is not None and self.technique is not None:
# folder name is used as key for the dictionary
night = os.path.basename(self.raw_path)
self.nights_dict[night] = {'full_path': self.raw_path,
'instrument': self.instrument,
'technique': self.technique}
else:
self.log.error('Failed to determine Instrument or Technique '
'for the night: {:s}'.format(self.raw_path))
def _get_instrument(self):
"""Identify Goodman's Camera
The header keyword of the camera is `INSTCONF`.
Notes:
This methods no longer offers backwards compatibility.
"""
instconf = self.objects_collection.instconf.unique()
if len(instconf) > 1:
for _inst in instconf:
self.log.debug("INSTCONF = {:s} is present.".format(_inst))
self.log.warning("Camera changes are forbidden during the night")
elif len(instconf) == 1:
self.instrument = instconf[0]
self.log.debug("Detected {:s} camera.".format(self.instrument))
# else:
# self.log.error("Impossible to determine which camera was used.")
def _get_obs_technique(self):
"""Identify if the data is Imaging or Spectroscopy
For imaging data the keyword `WAVMODE` is `Imaging` therefore the logic
here is: If there is only one value for `WAVMODE` and it is `Imaging`
then the technique is `Imaging`. If `Imaging` is in the result along
with other then it will assume the technique is Spectroscopy and will
ignore all the Imaging data. If none of the conditions above are met it
will assume the technique is Spectroscopy.
The result is stored as an attribute of the class.
"""
# self.technique = identify_technique()
wavmodes = [str(w).upper() for w in self.objects_collection.wavmode.unique()]
if len(wavmodes) == 1 and wavmodes[0] == 'IMAGING':
self.technique = 'Imaging'
elif 'IMAGING' in wavmodes and len(wavmodes) > 1:
self.log.error('There seems to be Imaging and Spectroscopic '
'data. I will assume the Imaging data are '
'acquisition images therefore they will be '
'ignored.')
self.log.info("If you really have Imaging data, please process "
"them in a separated folder.")
self.technique = 'Spectroscopy'
else:
self.technique = 'Spectroscopy'
# inform the results, no need to return
self.log.info('Detected {:s} Data from {:s} '
'Camera'.format(self.technique, self.instrument))
if __name__ == '__main__':
pass
| {
"repo_name": "soar-telescope/goodman",
"path": "goodman_pipeline/images/data_classifier.py",
"copies": "1",
"size": "6230",
"license": "bsd-3-clause",
"hash": 8230829663941967000,
"line_mean": 36.987804878,
"line_max": 85,
"alpha_frac": 0.5817014446,
"autogenerated": false,
"ratio": 4.33240611961057,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.541410756421057,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import time
import sys
import desitarget.io
import astropy.io.fits as fits
import numpy as np
from astropy.table import Table
############################################################
def sweep_mock_roots(input_yaml,sweep_root_dir='./output/sweep'):
"""
Returns a dict, keys are the input.yaml names of each source class and
values are the path to the sweeps for the corresponding mocks. Use to get
the input paths for the other sweep io routines.
Args:
input_yaml : path to the survey configuration yaml
sweep_root_dir : top of the sweep directory tree
"""
import yaml
with open(input_yaml,'r') as f:
param = yaml.load(f)
roots = list()
for source,v in param['sources'].items():
# Don't use os.path.join because of behaviour on leading / for
# v['root_mock_dir'].
sweep_subroot = os.path.normpath(sweep_root_dir +
os.path.sep +
v['root_mock_dir'])
roots.append((source,sweep_subroot))
return dict(roots)
############################################################
def prepare_sweep_data(sweep_mock_root_dir,data=None,epoch=0,filetype='observed'):
"""
Reads (if necessary) and combines all the sweep data under a given root.
Arguments:
sweep_mock_root_dir: this should be one of the entries in the dict
returned by sweep_mock_roots().
data: either None (default), a path to a specific .fits.gz file from
which the combined data can be read, or a numpy array/astropy Table
containting the combined data (which is not re-read).
"""
if data is None:
# Load the data if not passed directly
data = load_all_epoch(sweep_mock_root_dir,epoch=epoch,filetype=filetype)
elif isinstance(data,str):
# Data is a path to read the data from
print('Got data path: {}'.format(data))
# Only accept zipped fits as likely correct path
if not os.path.splitext(data)[-1] == '.fits.gz':
raise Exception('Data path does not have .fits.gz extension!')
# If the data exists, read it; if it doesn't exist, read all epochs as
# if data=None, but then also write the result out to the specified
# file.
if os.path.exists(data):
print('Reading cached data!')
data = Table.read(data)
else:
raise Exception('Cannot read data from {}, no such path'.format(data))
elif isinstance(data,Table):
# The data was passed in and was an astropy table, pass it back out
# again with no change.
print('Using existing table with {:d} rows'.format(len(data)))
pass
else:
# The data was passed in but was not an astropy table. Pass it back out
# again.
data = np.array(data)
print('Using existing table with {:d} rows'.format(len(data)))
return data
############################################################
def load_all_epoch(sweep_mock_root_dir,epoch=0,filetype='observed'):
"""
Iterates over the sweep files under a given root and reads them into
memory. This is a lower-level routine called by prepare_sweep_data().
As written this will only work if passed a sweep *sub*-root path (i.e. the
node above on particular type of mock) rather than the base sweep root
(output/sweep).
"""
print('Loading data for epoch {:d} under {}'.format(epoch,sweep_mock_root_dir))
# Walk directories
iter_sweep_files = desitarget.io.iter_files(sweep_mock_root_dir, '',
ext="{}.fits".format(filetype))
t0 = time.time()
data = list()
for fpath in list(iter_sweep_files):
fpath_epoch = int(os.path.split(os.path.split(fpath)[0])[-1])
if fpath_epoch == epoch:
data.append(fits.getdata(fpath))
nfiles = len(data)
if nfiles == 0:
__fname__ = sys._getframe().f_code.co_name
raise Exception('{}({},{},{}) read zero files!'.format(__fname__,
sweep_mock_root_dir,
epoch,
filetype))
data = np.concatenate(data)
t1 = time.time()
print('Read {:d} rows from {:d} files in {:f}s'.format(len(data),nfiles,t1-t0))
return data
############################################################
def combine_sweep_files(source_name,input_yaml,sweep_root_dir,
output_dir=None,
data=None,epoch=0,filetype='observed'):
"""
Concatenates all the sweep files for a given target class.
"""
roots = sweep_mock_roots(input_yaml,sweep_root_dir=sweep_root_dir)
if not source_name in roots.keys():
raise Exception("No source class {} in config {}".format(source_name,input_yaml))
sweep_mock_root_dir = roots[source_name]
t = prepare_sweep_data(sweep_mock_root_dir,data=data,epoch=epoch,filetype=filetype)
if output_dir is None:
output_dir = os.path.join(sweep_root_dir,'combined',source_name)
else:
output_dir = os.path.join(output_dir,source_name)
if not os.path.exists(output_dir): os.makedirs(output_dir)
output_name = '{}_{}_epoch{:d}.fits.gz'.format(source_name,filetype,epoch)
output_path = os.path.join(output_dir,output_name)
Table(t).write(output_path,overwrite=True)
print('Wrote combined sweep file to {}'.format(output_path))
return
| {
"repo_name": "apcooper/bright_analysis",
"path": "py/bright_analysis/sweeps/io.py",
"copies": "1",
"size": "5761",
"license": "bsd-3-clause",
"hash": -8862720921649169000,
"line_mean": 38.4589041096,
"line_max": 89,
"alpha_frac": 0.5768095817,
"autogenerated": false,
"ratio": 4.03149055283415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5108300134534149,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import time
import sys
import healpy as hp
import desiutil.plots as desiplot
import desitarget.io
import astropy.io.fits as fits
import numpy as np
import matplotlib.pyplot as pl
from astropy.table import Table
from matplotlib import rcParams
rcParams['font.family'] = 'monospace'
from bright_analysis.sweeps.io import prepare_sweep_data
############################################################
def hide_end_ticklabels(ax):
pl.setp(ax.get_xticklabels()[0],visible=False)
pl.setp(ax.get_yticklabels()[0],visible=False)
pl.setp(ax.get_xticklabels()[-1],visible=False)
pl.setp(ax.get_yticklabels()[-1],visible=False)
pl.draw()
return
############################################################
def plot_epoch_distance_ratio(sweep_root_dir=None,
data_obs=None,data_uno=None,
epoch=0,
ymax=0.0,
group_disc=False,
split_pop=True,
savepath=None,**kwargs):
"""
"""
if savepath is not None:
assert(os.path.splitext(savepath)[-1] in ['.png','.pdf'])
# Load the data if not passed directly
data_obs = prepare_sweep_data(sweep_root_dir,data_obs,epoch,filetype='observed')
data_uno = prepare_sweep_data(sweep_root_dir,data_uno,epoch,filetype='unobserved')
bin_size = 0.1 # dex
dhelio_log_bins = np.arange(-1,3,0.1)
bin_r = 10**(dhelio_log_bins)
bin_volume = (4.0*np.pi/3.0)*(bin_r**3)
bin_shell_vol = bin_volume[1:]-bin_volume[:-1]
dhelio_obs = data_obs['d_helio']
dhelio_uno = data_uno['d_helio']
hist_obs, _ = np.histogram(np.log10(dhelio_obs),bins=dhelio_log_bins)
hist_uno, _ = np.histogram(np.log10(dhelio_uno),bins=dhelio_log_bins)
ratio = np.array(hist_obs,dtype=np.float64)/(hist_obs+hist_uno)
figure = pl.figure(figsize=(5,5))
axmain = pl.gca()
axtop = axmain.twiny()
pl.sca(axmain)
plot_kwargs = dict(c='k',
drawstyle='steps-post',
lw=1.5,
zorder=10,
label='All obs.')
plot_kwargs.update(**kwargs)
pl.plot(dhelio_log_bins[:-1],ratio,**plot_kwargs)
if split_pop and 'popid' in data_obs.dtype.names:
popids = np.unique(data_obs['popid'])
c = [pl.cm.viridis(i) for i in np.linspace(0,0.9,len(popids))]
for i,jpop in enumerate(popids):
if group_disc and jpop < 7:
continue
mask_obs = data_obs['popid'] == jpop
mask_uno = data_uno['popid'] == jpop
bin_midpoints = dhelio_log_bins[:-1] + 0.5*(dhelio_log_bins[1]-dhelio_log_bins[0])
hist_obs, _ = np.histogram(np.log10(dhelio_obs[mask_obs]),bins=dhelio_log_bins)
hist_uno, _ = np.histogram(np.log10(dhelio_uno[mask_uno]),bins=dhelio_log_bins)
ratio = np.array(hist_obs,dtype=np.float64)/(hist_obs+hist_uno)
plot_kwargs = dict(c=c[i],drawstyle='solid',label='Pop %d'%(jpop))
plot_kwargs.update(**kwargs)
pl.plot(bin_midpoints,ratio,**plot_kwargs)
if group_disc:
# All disk components
mask_obs = (data_obs['popid'] != 8) & (data_obs['popid'] != 7)
mask_uno = (data_uno['popid'] != 8) & (data_uno['popid'] != 7)
bin_midpoints = dhelio_log_bins[:-1] + 0.5*(dhelio_log_bins[1]-dhelio_log_bins[0])
hist_obs, _ = np.histogram(np.log10(dhelio_obs[mask_obs]),bins=dhelio_log_bins)
hist_uno, _ = np.histogram(np.log10(dhelio_uno[mask_uno]),bins=dhelio_log_bins)
ratio = np.array(hist_obs,dtype=np.float64)/(hist_obs+hist_uno)
plot_kwargs = dict(c='b',linestyle='--',label='Pop 0-6',lw=1.5)
pl.plot(bin_midpoints,ratio,**plot_kwargs)
pl.sca(axtop)
axtop.set_xlim(5*np.log10(0.1*1000.0)-5.0,5*np.log10(400*1000.0)-5.0)
axtop.set_ylim(0,max(0.5,ymax))
#pl.axvline(19.0,ls='--',c='grey',zorder=-20)
#pl.axvline(20.0,ls='--',c='grey',zorder=-20)
pl.xlabel('$\mathtt{Distance\ Modulus}$',fontsize=12)
hide_end_ticklabels(pl.gca())
pl.sca(axmain)
leg = pl.legend(loc='upper right',fontsize=8,frameon=True,ncol=2)
leg.set_zorder(5)
frame = leg.get_frame()
frame.set_facecolor('white')
pl.xlabel('$\mathtt{\log_{10} \, D_{helio}/kpc}$',fontsize=12)
pl.ylabel(r'$\mathtt{Fraction\ of\ targets\ observed}$',fontsize=12)
pl.xlim(np.log10(0.1),np.log10(400.0))
pl.ylim(0,max(0.5,ymax))
pl.grid(color='grey',linestyle=':')
hide_end_ticklabels(pl.gca())
pl.draw()
if savepath is not None:
pl.savefig(savepath,bbox_inches='tight',pad_inches=0.1)
print('Saved figure to {}'.format(savepath))
return
############################################################
def plot_epoch_distance(sweep_root_dir=None,data=None,epoch=0,
split_pop=True,
filetype='observed',
group_disc=False,
savepath=None,**kwargs):
"""
"""
if savepath is not None:
os.path.splitext(savepath)[-1] in ['.png','.pdf']
# Load the data if not passed directly
data = prepare_sweep_data(sweep_root_dir,data,epoch,filetype=filetype)
# Heliocentric distance is in kpc
bin_size = 0.1 # dex
dhelio_log_bins = np.arange(-1,3,0.1)
dhelio = data['d_helio']
hist, _ = np.histogram(np.log10(dhelio),bins=dhelio_log_bins)
figure = pl.figure(figsize=(5,5))
plot_kwargs = dict(c='k',
drawstyle='steps-post',
lw=1.5,
zorder=10,
label='All')
plot_kwargs.update(**kwargs)
pl.plot(dhelio_log_bins[:-1],np.log10(hist),**plot_kwargs)
if split_pop and 'popid' in data.dtype.names:
popids = np.unique(data['popid'])
c = [pl.cm.viridis(i) for i in np.linspace(0,0.9,len(popids))]
for i,jpop in enumerate(popids):
if group_disc and jpop < 7:
continue
mask = data['popid'] == jpop
bin_midpoints = dhelio_log_bins[:-1] + 0.5*(dhelio_log_bins[1]-dhelio_log_bins[0])
hist, _ = np.histogram(np.log10(dhelio[mask]),bins=dhelio_log_bins)
plot_kwargs = dict(c=c[i],drawstyle='solid',label='Pop %d'%(jpop))
plot_kwargs.update(**kwargs)
pl.plot(bin_midpoints,np.log10(hist),**plot_kwargs)
if group_disc:
mask = (data['popid'] != 7) & (data['popid'] != 8)
bin_midpoints = dhelio_log_bins[:-1] + 0.5*(dhelio_log_bins[1]-dhelio_log_bins[0])
hist, _ = np.histogram(np.log10(dhelio[mask]),bins=dhelio_log_bins)
plot_kwargs = dict(c='b',linestyle='--',label='Pop 0-6',lw=1.5)
plot_kwargs.update(**kwargs)
pl.plot(bin_midpoints,np.log10(hist),**plot_kwargs)
pl.legend(loc='upper right',fontsize=8,frameon=False,ncol=2)
pl.xlabel('$\mathtt{\log_{10} \, D_{helio}/kpc}$',fontsize=12)
pl.ylabel(r'$\mathtt{d\,\log_{10}\,N \,\, {[bins\, of\, %2.1f\, dex]}}$'%(bin_size),fontsize=12)
pl.xlim(-1,2.5)
pl.ylim(1,7)
pl.grid(color='grey',linestyle=':')
hide_end_ticklabels(pl.gca())
pl.draw()
if savepath is not None:
pl.savefig(savepath,bbox_inches='tight',pad_inches=0.1)
print('Saved figure to {}'.format(savepath))
return
############################################################
def plot_epoch_distance_cumulative(sweep_root_dir=None,data=None,epoch=0,
split_pop=True,filetype='observed',
group_disc=False,
savepath=None,**kwargs):
"""
"""
if savepath is not None:
os.path.splitext(savepath)[-1] in ['.png','.pdf']
# Load the data if not passed directly
data = prepare_sweep_data(sweep_root_dir,data,epoch,filetype=filetype)
# Heliocentric distance is in kpc
dhelio = data['d_helio']
rsort = np.argsort(dhelio)
figure = pl.figure(figsize=(5,5))
axmain = pl.gca()
axtop = axmain.twiny()
plot_kwargs = dict(c='k',
drawstyle='solid',
lw=2,
zorder=10,
label='All')
plot_kwargs.update(**kwargs)
axmain.plot(np.log10(dhelio[rsort]),np.log10(len(dhelio)-np.arange(0,len(dhelio))),**plot_kwargs)
axtop.plot(5*np.log10(1000.0*dhelio[rsort])-5.0,np.log10(len(dhelio)-np.arange(0,len(dhelio))),**plot_kwargs)
pl.sca(axmain)
if split_pop and 'popid' in data.dtype.names:
popids = np.unique(data['popid'])
c = [pl.cm.viridis(i) for i in np.linspace(0,0.8,len(popids))]
for i,jpop in enumerate(popids):
if group_disc and jpop < 7:
continue
mask = data['popid'] == jpop
nmask = np.sum(mask)
dhelio = data['d_helio'][mask]
rsort = np.argsort(dhelio)
plot_kwargs = dict(c=c[i],linestyle='solid',label='Pop %d'%(jpop))
plot_kwargs.update(**kwargs)
pl.plot(np.log10(dhelio[rsort]),np.log10(nmask-np.arange(0,nmask)),**plot_kwargs)
if group_disc:
# All disk components
mask = (data['popid'] != 8) & (data['popid'] != 7)
nmask = np.sum(mask)
dhelio = data['d_helio'][mask]
rsort = np.argsort(dhelio)
plot_kwargs = dict(c='b',linestyle='--',label='Pop 0-6',lw=1.5)
plot_kwargs.update(**kwargs)
axmain.plot(np.log10(dhelio[rsort]),np.log10(nmask-np.arange(0,nmask)),**plot_kwargs)
pl.sca(axtop)
axtop.set_xlim(5*np.log10(0.1*1000.0)-5.0,5*np.log10(400*1000.0)-5.0)
axtop.set_ylim(2,7.5)
axtop.set_xlabel('$\mathtt{Distance\ Modulus}$',fontsize=12)
axtop.set_yticklabels(axtop.get_yticks(),family='monospace')
hide_end_ticklabels(pl.gca())
pl.sca(axmain)
pl.legend(loc='upper right',fontsize=8,frameon=False,ncol=2,columnspacing=0.6)
pl.xlabel('$\mathtt{\log_{10} \ D_{helio}/kpc}$',fontsize=12)
pl.ylabel(r'$\mathtt{\log_{10}\,N(>D)}$',fontsize=12)
axmain.set_yticklabels(axtop.get_yticks(),family='monospace')
pl.title('Observed Targets',y=1.12,fontsize=12)
axmain.set_xlim(np.log10(0.1),np.log10(400))
axmain.set_ylim(2,7.5)
pl.grid(color='grey',linestyle=':')
hide_end_ticklabels(pl.gca())
pl.draw()
if savepath is not None:
pl.savefig(savepath,bbox_inches='tight',pad_inches=0.1)
print('Saved figure to {}'.format(savepath))
return
############################################################
def plot_epoch_distance_ratio_cumulative(sweep_root_dir=None,
data_obs=None,data_uno=None,
epoch=0,
split_pop=True,group_disc=False,
savepath=None,**kwargs):
"""
"""
if savepath is not None:
os.path.splitext(savepath)[-1] in ['.png','.pdf']
# Load the data if not passed directly
data_obs = prepare_sweep_data(sweep_root_dir,data_obs,epoch,filetype='observed')
data_uno = prepare_sweep_data(sweep_root_dir,data_uno,epoch,filetype='unobserved')
dhelio_obs = data_obs['d_helio']
rsort_obs = np.argsort(dhelio_obs)
n_obs = np.arange(1,len(rsort_obs)+1)
dhelio_uno = data_uno['d_helio']
rsort_uno = np.argsort(dhelio_uno)
n_uno = np.arange(1,len(rsort_uno)+1)
n_uno_at_robs = np.interp(dhelio_obs[rsort_obs],dhelio_uno[rsort_uno],n_uno)
# Fraction of stars observed to a given distance/
ratio = n_obs/n_uno_at_robs
figure = pl.figure(figsize=(5,5))
axmain = pl.gca()
axtop = axmain.twiny()
plot_kwargs = dict(c='k',
drawstyle='solid',
lw=2,
zorder=10,
label='All')
plot_kwargs.update(**kwargs)
axmain.plot(np.log10(dhelio_obs[rsort_obs]), ratio,**plot_kwargs)
axtop.plot(5*np.log10(dhelio_obs[rsort_obs]*1000.0) - 5.0, ratio,**plot_kwargs)
pl.sca(axmain)
if split_pop and 'popid' in data_obs.dtype.names:
popids = np.unique(data_obs['popid'])
c = [pl.cm.viridis(i) for i in np.linspace(0,0.8,len(popids))]
for i,jpop in enumerate(popids):
if group_disc and jpop < 7:
continue
mask_obs = data_obs['popid'] == jpop
mask_uno = data_uno['popid'] == jpop
dhelio_obs = data_obs['d_helio'][mask_obs]
rsort_obs = np.argsort(dhelio_obs)
n_obs = np.arange(1,len(rsort_obs)+1)
dhelio_uno = data_uno['d_helio'][mask_uno]
rsort_uno = np.argsort(dhelio_uno)
n_uno = np.arange(1,len(rsort_uno)+1)
n_uno_at_robs = np.interp(dhelio_obs[rsort_obs],dhelio_uno[rsort_uno],n_uno)
ratio = n_obs/n_uno_at_robs
plot_kwargs = dict(c=c[i],linestyle='solid',label='Pop %d'%(jpop))
plot_kwargs.update(**kwargs)
axmain.plot(np.log10(dhelio_obs[rsort_obs]),ratio,**plot_kwargs)
if group_disc:
# All disk components
mask_obs = (data_obs['popid'] != 8) & (data_obs['popid'] != 7)
mask_uno = (data_uno['popid'] != 8) & (data_uno['popid'] != 7)
dhelio_obs = data_obs['d_helio'][mask_obs]
rsort_obs = np.argsort(dhelio_obs)
n_obs = np.arange(1,len(rsort_obs)+1)
dhelio_uno = data_uno['d_helio'][mask_uno]
rsort_uno = np.argsort(dhelio_uno)
n_uno = np.arange(1,len(rsort_uno)+1)
n_uno_at_robs = np.interp(dhelio_obs[rsort_obs],dhelio_uno[rsort_uno],n_uno)
ratio = n_obs/n_uno_at_robs
plot_kwargs = dict(c='b',linestyle='--',label='Pop 0-6',lw=1.5)
plot_kwargs.update(**kwargs)
axmain.plot(np.log10(dhelio_obs[rsort_obs]),ratio,**plot_kwargs)
pl.sca(axtop)
axtop.set_xlim(5*np.log10(0.1*1000.0)-5.0,5*np.log10(400*1000.0)-5.0)
axtop.set_ylim(0,1)
axtop.set_xlabel('$\mathtt{Distance\ Modulus}$',fontsize=12)
axtop.set_yticklabels(axtop.get_yticks(),family='monospace')
hide_end_ticklabels(pl.gca())
pl.sca(axmain)
pl.legend(loc='upper right',fontsize=8,frameon=False,ncol=2,columnspacing=0.6)
pl.xlabel('$\mathtt{\log_{10} \ D_{helio}/kpc}$',fontsize=12)
pl.ylabel(r'$\mathtt{N_{obs}(<D)/N_{tot}(<D)}$',fontsize=12)
axmain.set_yticklabels(axtop.get_yticks(),family='monospace')
pl.title('Observed/Total',y=1.12,fontsize=12)
axmain.set_xlim(np.log10(0.1),np.log10(400))
axmain.set_ylim(0,1)
pl.grid(color='grey',linestyle=':')
hide_end_ticklabels(pl.gca())
pl.draw()
if savepath is not None:
pl.savefig(savepath,bbox_inches='tight',pad_inches=0.1)
print('Saved figure to {}'.format(savepath))
return
############################################################
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('sweep_root_dir')
parser.add_argument('-e','--epoch',default=0, type=int)
parser.add_argument('-s','--savepath',default=None)
args = parser.parse_args()
plot_epoch_summary(args.sweep_root_dir,epoch=args.epoch,
savepath=args.savepath)
| {
"repo_name": "apcooper/bright_analysis",
"path": "py/bright_analysis/plots/distance.py",
"copies": "1",
"size": "15941",
"license": "bsd-3-clause",
"hash": 5249013871630803000,
"line_mean": 36.3325526932,
"line_max": 113,
"alpha_frac": 0.5477699015,
"autogenerated": false,
"ratio": 2.9908067542213885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40385766557213887,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import numpy as np
import kplr
from astropy.io import ascii
import h5py
from astropy.utils.console import ProgressBar
from astropy.utils.data import download_file
from astropy.table import Column, unique, join
__all__ = ['cache_light_curves', 'get_planets_table', 'cache_planets_table',
'planet_props', 'lc_archive']
kic_numbers_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, 'data', 'kics.csv')
planet_table_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, 'data', 'joined_table.csv')
light_curves_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, 'data', 'light_curves.hdf5')
stats_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.path.pardir, 'data', 'stats.hdf5')
def cache_light_curves():
"""
Run this after running `choose_targets.ipynb` in order to cache light curves
into a local HDF5 archive.
Examples
--------
>>> from salter import cache_light_curves; cache_light_curves()
"""
if os.path.exists(light_curves_path):
raise ValueError('Light curves file already exists, at {0}'
.format(light_curves_path))
if not os.path.exists(kic_numbers_path):
raise ValueError("You must first run the `choose_targets.ipynb` "
"notebook before running `cache_light_curves`")
kics = ascii.read(kic_numbers_path, format='no_header')['col1']
client = kplr.API()
# Create archive
f = h5py.File(light_curves_path, 'w')
with ProgressBar(len(kics)) as bar:
for kic in kics:
if str(kic) not in f.keys():
# Find a KIC
star = client.star(kic)
# Download the lightcurves for this KOI.
lightcurves = star.get_light_curves(short_cadence=False)
# Loop over the datasets and read in the data.
time, flux, ferr, quality, quarter = [], [], [], [], []
for i, lc in enumerate(lightcurves):
with lc.open() as lc_file:
# The lightcurve data are in the first FITS HDU.
hdu_data = lc_file[1].data
time.append(hdu_data["time"])
flux.append(hdu_data["sap_flux"])
ferr.append(hdu_data["sap_flux_err"])
quality.append(hdu_data["sap_quality"])
quarter.append(i * np.ones_like(hdu_data["time"]))
data = np.vstack(list(map(np.concatenate, [time, flux, ferr, quality, quarter]))).T
f.create_dataset(str(kic), data=data)
f.flush()
bar.update()
f.close()
def cache_planets_table():
"""
Cache a joined table containing data from the NASA Exoplanet Archive and
the Exoplanet Orbit Database.
To get the table, run the `~salter.get_planets_table()` function.
"""
NEA_URL = 'https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=cumulative'
EOD_URL = 'http://exoplanets.org/csv-files/exoplanets.csv'
nea_table = ascii.read(download_file(NEA_URL, cache=False))
eod_table = ascii.read(download_file(EOD_URL, cache=False))
eod_table2 = eod_table[~eod_table['KEPID'].mask]
nea_table2 = nea_table[~nea_table['kepid'].mask]
eod_table2.add_column(Column(eod_table2['KEPID'], 'kepid'))
joined_table = join(eod_table2, nea_table2, keys=['kepid'])
ascii.write(joined_table, planet_table_path, format='csv')
def get_planets_table():
"""
Get the joined planets table from the NASA Exoplanet Archive and
the Exoplanet Orbit Database.
Returns
-------
table : `~astropy.table.Table`
Table of exoplanet properties
"""
if not os.path.exists(planet_table_path):
raise ValueError("You must run salter.cache.cache_planets_table first "
"before you can run get_joined_table")
table = ascii.read(planet_table_path, format='csv')
# Toss out multis
first_kois_only = np.array([koi.endswith('01')
for koi in table['kepoi_name']])
table = table[first_kois_only]
table.add_index('kepid')
# Ensure only unique results
unique_table = unique(table, keys='kepid')
unique_table.add_index('kepid')
return unique_table
class PlanetProperties(object):
"""
Cache manager for planet properties table.
"""
def __init__(self):
self._table = None
@property
def table(self):
"""
Column definitions can be found at [1]_ and [2]_.
References
----------
.. [1] http://exoplanets.org/help/common/data
.. [2] https://exoplanetarchive.ipac.caltech.edu/docs/API_kepcandidate_columns.html
"""
if self._table is None:
self._table = get_planets_table()
return self._table
class LightCurveArchive(object):
"""
Light curve HDF5 archive manager
"""
def __init__(self):
self._file = None
@property
def file(self):
"""
Return an open HDF5 file stream of the light curve archive.
"""
if self._file is None:
self._file = h5py.File(light_curves_path, 'r')
return self._file
planet_props = PlanetProperties()
lc_archive = LightCurveArchive()
| {
"repo_name": "bmorris3/salter",
"path": "salter/cache.py",
"copies": "1",
"size": "5676",
"license": "mit",
"hash": 3054766055242547700,
"line_mean": 31.8092485549,
"line_max": 104,
"alpha_frac": 0.5856236786,
"autogenerated": false,
"ratio": 3.5833333333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46689570119333335,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os.path
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import get_appsettings, setup_logging
from .. import model
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
model.Session.configure(bind=engine)
model.Base.metadata.drop_all(engine)
model.Base.metadata.create_all(engine)
with transaction.manager:
root_user = model.User(
name=u'Scott Torborg',
email='storborg@gmail.com',
)
root_user.update_password('test')
model.Session.add(root_user)
| {
"repo_name": "storborg/warpworks",
"path": "warpworks/scripts/initializedb.py",
"copies": "1",
"size": "1033",
"license": "mit",
"hash": 1671557267108904000,
"line_mean": 25.4871794872,
"line_max": 66,
"alpha_frac": 0.6437560503,
"autogenerated": false,
"ratio": 3.5016949152542374,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46454509655542375,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
import numpy as np
from .geom import geom
from matplotlib.patches import Rectangle
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
class geom_tile(geom):
DEFAULT_AES = {}
REQUIRED_AES = {'x', 'y', 'fill'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
_aes_renames = {}
_units = set()
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
fill = pinfo.pop('fill')
# TODO: Fix this hack!
# Currently, if the fill is specified in the ggplot aes wrapper, ggplot
# will assign colors without regard to the fill values. This is okay for
# categorical maps but not heatmaps. At this stage in the pipeline the
# geom can't recover the original values.
#
# However, if the fill is specified in the geom_tile aes wrapper, the
# original fill values are sent unaltered, so we can make a heat map
# with the values.
# Was the fill specified in geom wrapper only? (i.e. not in ggplot)
if 'fill' in self.aes_unique_to_geom:
# Determine if there are non-numeric values.
if False in [isinstance(v, (int, long, float, complex)) for v in set(fill)]:
# No need to handle this case. Instruct the user to put categorical
# values in the ggplot wrapper.
raise Exception('For categorical fill values specify fill in the ggplot aes instead of the geom_tile aes.')
# All values are numeric so determine fill using colormap.
else:
fill_min = np.min(fill)
fill_max = np.max(fill)
if np.isnan(fill_min):
raise Exception('Fill values cannot contain NaN values.')
fill_rng = float(fill_max - fill_min)
fill_vals = (fill - fill_min) / fill_rng
cmap = self.gg.colormap(fill_vals.tolist())
fill = [colors.rgb2hex(c) for c in cmap[::, :3]]
df = pd.DataFrame(
{'x': x, 'y': y, 'fill': fill}).set_index(['x', 'y']).unstack(0)
# Setup axes.
x_ticks = range(2*len(set(x)) + 1)
y_ticks = range(2*len(set(y)) + 1)
x_indices = sorted(set(x))
y_indices = sorted(set(y))
# Setup box plotting parameters.
x_start = 0
y_start = 0
x_step = 2
y_step = 2
# Plot grid.
on_y = y_start
for yi in xrange(len(y_indices)):
on_x = x_start
for xi in xrange(len(x_indices)):
color = df.iloc[yi,xi]
if not isinstance(color, float):
ax.add_patch(Rectangle((on_x, on_y), x_step, y_step, facecolor=color))
on_x += x_step
on_y += y_step
# Draw the colorbar scale if drawing a heat map.
if 'cmap' in locals():
norm = colors.Normalize(vmin = fill_min, vmax = fill_max)
cax, kw = colorbar.make_axes(ax)
cax.hold(True)
colorbar.ColorbarBase(cax, cmap = self.gg.colormap, norm = norm)
# Set axis labels and ticks.
x_labels = ['']*(len(x_indices)+1)
for i,v in enumerate(x_indices): x_labels.insert(2*i+1, v)
y_labels = ['']*(len(y_indices)+1)
for i,v in enumerate(y_indices): y_labels.insert(2*i+1, v)
ax.set_xticklabels(x_labels)
ax.set_xticks(x_ticks)
ax.set_yticklabels(y_labels)
ax.set_yticks(y_ticks)
| {
"repo_name": "udacity/ggplot",
"path": "ggplot/geoms/geom_tile.py",
"copies": "12",
"size": "3695",
"license": "bsd-2-clause",
"hash": -2008258174221249000,
"line_mean": 36.3232323232,
"line_max": 123,
"alpha_frac": 0.5548037889,
"autogenerated": false,
"ratio": 3.6620416253716552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
import os
import sys
_ROOT = os.path.abspath(os.path.dirname(__file__))
diamonds = pd.read_csv(os.path.join(_ROOT, "diamonds.csv"))
mtcars = pd.read_csv(os.path.join(_ROOT, "mtcars.csv"))
meat = pd.read_csv(os.path.join(_ROOT, "meat.csv"), parse_dates=[0])
movies = pd.read_csv(os.path.join(_ROOT, "movies.csv"))
pageviews = pd.read_csv(os.path.join(_ROOT, "pageviews.csv"), parse_dates=[0])
pigeons = pd.read_csv(os.path.join(_ROOT, "pigeons.csv"))
chopsticks = pd.read_csv(os.path.join(_ROOT, "chopsticks.csv"))
mpg = pd.read_csv(os.path.join(_ROOT, "mpg.csv"))
salmon = pd.read_csv(os.path.join(_ROOT, "salmon.csv"))
def load_world():
"""
Load world map data. This will return a data frame that contains
countries and their coordinate boundaries.
Examples
--------
>>> load_world().head()
country lat lng part country-part lat_proj lng_proj
0 Aruba 12.577582 -69.996938 0 Aruba0 206.255742 232.225312
1 Aruba 12.531724 -69.936391 0 Aruba0 206.369267 232.313402
2 Aruba 12.519232 -69.924672 0 Aruba0 206.391240 232.337395
3 Aruba 12.497016 -69.915761 0 Aruba0 206.407948 232.380064
4 Aruba 12.453559 -69.880198 0 Aruba0 206.474629 232.463517
>>> load_world().tail()
country lat lng part country-part lat_proj \
548651 Zimbabwe -15.619666 29.814283 0 Zimbabwe0 393.401781
548652 Zimbabwe -15.614808 29.837331 0 Zimbabwe0 393.444995
548653 Zimbabwe -15.618839 29.881773 0 Zimbabwe0 393.528323
548654 Zimbabwe -15.641473 29.967504 0 Zimbabwe0 393.689069
548655 Zimbabwe -15.646227 30.010654 0 Zimbabwe0 393.769975
lng_proj
548651 285.656522
548652 285.647065
548653 285.654913
548654 285.698982
548655 285.708239
"""
_DATA_DIR = os.path.join(os.path.expanduser("~"), ".ggplot")
if not os.path.exists(_DATA_DIR):
os.mkdir(_DATA_DIR)
f = os.path.join(_DATA_DIR, "world.csv")
if os.path.exists(f):
world = pd.read_csv(f)
else:
sys.stderr.write("downloading world data set...")
url = "https://raw.githubusercontent.com/yhat/ggplot/master/data/world.csv"
world = pd.read_csv(url)
world.to_csv(f, index=False)
sys.stderr.write("done!")
return world
| {
"repo_name": "yhat/ggplot",
"path": "ggplot/datasets/__init__.py",
"copies": "1",
"size": "2552",
"license": "bsd-2-clause",
"hash": 8496658385016726000,
"line_mean": 40.8360655738,
"line_max": 83,
"alpha_frac": 0.6195141066,
"autogenerated": false,
"ratio": 2.6694560669456067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8783440639327548,
"avg_score": 0.0011059068436117616,
"num_lines": 61
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
from ggplot.utils import pop, make_iterable, make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
class stat_hline(stat):
DEFAULT_PARAMS = {'geom': 'hline', 'position': 'identity',
'yintercept': 0}
CREATES = {'yintercept'}
def _calculate(self, data):
y = pop(data, 'y', None)
# yintercept may be one of:
# - aesthetic to geom_hline or
# - parameter setting to stat_hline
yintercept = pop(data, 'yintercept', self.params['yintercept'])
if hasattr(yintercept, '__call__'):
if y is None:
raise GgplotError(
'To compute the intercept, y aesthetic is needed')
try:
yintercept = yintercept(y)
except TypeError as err:
raise GgplotError(*err.args)
yintercept = make_iterable(yintercept)
new_data = pd.DataFrame({'yintercept': yintercept})
# Copy the other aesthetics into the new dataframe
n = len(yintercept)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| {
"repo_name": "andnovar/ggplot",
"path": "ggplot/stats/stat_hline.py",
"copies": "12",
"size": "1317",
"license": "bsd-2-clause",
"hash": -8487484122100909000,
"line_mean": 33.6578947368,
"line_max": 71,
"alpha_frac": 0.5854214123,
"autogenerated": false,
"ratio": 3.9788519637462234,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 38
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
from .stat import stat
_MSG_LABELS = """There are more than 30 unique values mapped to x.
If you want a histogram instead, use 'geom_histogram()'.
"""
class stat_bar(stat):
REQUIRED_AES = {'x', 'y'}
DEFAULT_PARAMS = {'geom': 'bar', 'position': 'stack',
'width': 0.9, 'drop': False,
'origin': None, 'labels': None}
def _calculate(self, data):
# reorder x according to the labels
new_data = pd.DataFrame()
new_data["x"] = self.labels
for column in set(data.columns) - set('x'):
column_dict = dict(zip(data["x"],data[column]))
default = 0 if column == "y" else data[column].values[0]
new_data[column] = [column_dict.get(val, default)
for val in self.labels]
return new_data
def _calculate_global(self, data):
labels = self.params['labels']
if labels == None:
labels = sorted(set(data['x'].values))
# For a lot of labels, put out a warning
if len(labels) > 30:
self._print_warning(_MSG_LABELS)
# Check if there is a mapping
self.labels = labels
| {
"repo_name": "benslice/ggplot",
"path": "ggplot/stats/stat_bar.py",
"copies": "12",
"size": "1322",
"license": "bsd-2-clause",
"hash": -834347888771891300,
"line_mean": 32.8974358974,
"line_max": 68,
"alpha_frac": 0.5514372163,
"autogenerated": false,
"ratio": 3.911242603550296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from .legend import get_labels
SHAPES = [
'o',#circle
'^',#triangle up
'D',#diamond
'v',#triangle down
'+',#plus
'x',#x
's',#square
'*',#star
'p',#pentagon
'*'#octagon
]
def shape_gen():
while True:
for shape in SHAPES:
yield shape
def assign_shapes(data, aes):
"""Assigns shapes to the given data based on the aes and adds the right legend
Parameters
----------
data : DataFrame
dataframe which should have shapes assigned to
aes : aesthetic
mapping, including a mapping from shapes to variable
Returns
-------
data : DataFrame
the changed dataframe
legend_entry : dict
An entry into the legend dictionary.
Documented in `components.legend`
"""
legend_entry = dict()
if 'shape' in aes:
shape_col = aes['shape']
shape = shape_gen()
labels, scale_type, indices = get_labels(data, shape_col, "discrete")
# marker in matplotlib are not unicode ready in 1.3.1 :-( -> use explicit str()...
shape_mapping = dict((value, str(six.next(shape))) for value in labels)
data[':::shape_mapping:::'] = data[shape_col].apply(
lambda x: shape_mapping[x])
legend_entry = {'column_name': shape_col,
'dict': dict((v, k) for k, v in shape_mapping.items()),
'scale_type': "discrete"}
return data, legend_entry
| {
"repo_name": "smblance/ggplot",
"path": "ggplot/components/shapes.py",
"copies": "12",
"size": "1576",
"license": "bsd-2-clause",
"hash": 3400730599622733300,
"line_mean": 26.649122807,
"line_max": 90,
"alpha_frac": 0.5742385787,
"autogenerated": false,
"ratio": 3.8627450980392157,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six import with_metaclass
import numpy as np
import itertools
from slicerator import Slicerator, propagate_attr, index_attr
from .frame import Frame
from abc import ABCMeta, abstractmethod, abstractproperty
from warnings import warn
class FramesStream(with_metaclass(ABCMeta, object)):
"""
A base class for wrapping input data which knows how to
advance to the next frame, but does not have random access.
The length does not need to be finite.
Does not support slicing.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __iter__(self):
pass
@abstractproperty
def pixel_type(self):
"""Returns a numpy.dtype for the data type of the pixel values"""
pass
@abstractproperty
def frame_shape(self):
"""Returns the shape of a single frame as a tuple ex (10, 12)"""
pass
@classmethod
def class_exts(cls):
"""
Return a set of the file extensions that this reader can deal with.
Sub-classes should over-ride this function to list what extensions
they deal with.
The default interpretation of the returned set is 'file
extensions including but not exclusively'.
"""
return set()
@property
def exts(self):
"""
Property to get the extensions of a FramesStream class.
Calls relevant classmethod.
"""
return type(self).class_exts()
def close(self):
"""
A method to clean up anything that need to be cleaned up.
Sub-classes should use super to call up the MRO stack and then
do any class-specific clean up
"""
pass
def _validate_process_func(self, process_func):
if process_func is None:
process_func = lambda x: x
if not callable(process_func):
raise ValueError("process_func must be a function, or None")
self.process_func = process_func
def _as_grey(self, as_grey, process_func):
# See skimage.color.colorconv in the scikit-image project.
# As noted there, the weights used in this conversion are calibrated
# for contemporary CRT phosphors. Any alpha channel is ignored."""
if as_grey:
if process_func is not None:
raise ValueError("The as_grey option cannot be used when "
"process_func is specified. Incorpate "
"greyscale conversion in the function "
"passed to process_func.")
shape = self.frame_shape
ndim = len(shape)
# Look for dimensions that look like color channels.
rgb_like = shape.count(3) == 1
rgba_like = shape.count(4) == 1
if ndim == 2:
# The image is already greyscale.
process_func = None
elif ndim == 3 and (rgb_like or rgba_like):
reduced_shape = list(shape)
if rgb_like:
color_axis_size = 3
calibration = [0.2125, 0.7154, 0.0721]
else:
color_axis_size = 4
calibration = [0.2125, 0.7154, 0.0721, 0]
reduced_shape.remove(color_axis_size)
self._im_sz = tuple(reduced_shape)
def convert_to_grey(img):
color_axis = img.shape.index(color_axis_size)
img = np.rollaxis(img, color_axis, 3)
grey = (img * calibration).sum(2)
return grey.astype(img.dtype) # coerce to original dtype
self.process_func = convert_to_grey
else:
raise NotImplementedError("I don't know how to convert an "
"image of shaped {0} to greyscale. "
"Write you own function and pass "
"it using the process_func "
"keyword argument.".format(shape))
# magic functions to make all sub-classes usable as context managers
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Frame Shape: {frame_shape!r}
Pixel Datatype: {dtype}""".format(frame_shape=self.frame_shape,
dtype=self.pixel_type)
@Slicerator.from_class
class FramesSequence(FramesStream):
"""Baseclass for wrapping data buckets that have random access.
Support random access.
Supports standard slicing and fancy slicing and returns a resliceable
Slicerator object.
Must be finite length.
"""
propagate_attrs = ['frame_shape', 'pixel_type']
def __getitem__(self, key):
"""__getitem__ is handled by Slicerator. In all pims readers, the data
returning function is get_frame."""
return self.get_frame(key)
def __iter__(self):
return iter(self[:])
@abstractmethod
def __len__(self):
"""
It is obligatory that sub-classes define a length.
"""
pass
@abstractmethod
def get_frame(self, ind):
"""
Sub classes must over-ride this function for how to get a given
frame out of the file. Any data-type specific internal-state
nonsense should be dealt with in this function.
"""
pass
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count=len(self),
dtype=self.pixel_type)
class FrameRewindableStream(FramesStream):
"""
A base class for holding the common code for
wrapping data sources that do not rewind easily.
"""
@abstractmethod
def rewind(self, j=0):
"""
Resets the stream to frame j
j : int
Frame to rewind the stream to
"""
pass
@abstractmethod
def skip_forward(self, j):
"""
Skip the stream forward by j frames.
j : int
Number of frames to skip
"""
pass
@abstractmethod
def next(self):
"""
return the next frame in the stream
"""
pass
@abstractmethod
def __len__(self):
pass
@abstractproperty
def current(self):
"""
The current location in the stream.
Can be an int if in stream or None if out the end.
"""
pass
def __iter__(self):
self.rewind(0)
return self
def __getitem__(self, arg):
"""
Returns a generator which yields frames
"""
if isinstance(arg, slice):
# get value from slice
start, stop, step = arg.start, arg.stop, arg.step
# sanitize step
if step is None:
step = 1
if step < 1:
raise ValueError("step must be positive")
# make sure the stream is in the right place to start
if start is None:
start = 0
if start < self.current:
self.rewind(start)
if start > self.current:
self.skip_forward(start - self.current)
# sanity check
if stop is not None and stop < start:
raise ValueError("start must be less than stop")
# special case, we can't just return self, because __iter__ rewinds
if step == 1 and stop is None:
# keep going until exhausted
return (self.next() for _ in itertools.repeat(True))
return self._step_gen(step, stop)
elif isinstance(arg, int):
self.rewind(arg)
return self.next()
else:
raise ValueError("Invalid argument, use either a `slice` or " +
"or an `int`. not {t}".format(t=str(type(arg))))
def _step_gen(self, step, stop):
"""
Wraps up the logic of stepping forward by step > 1
"""
while stop is None or self.current < stop:
yield self.next()
self.skip_forward(step - 1)
else:
raise StopIteration
def __repr__(self):
# May be overwritten by subclasses
return """<Frames>
Length: {count} frames
Frame Shape: {w} x {h}
Pixel Datatype: {dtype}""".format(w=self.frame_shape[0],
h=self.frame_shape[1],
count=len(self),
dtype=self.pixel_type)
def _iter_attr(obj):
try:
for ns in [obj] + obj.__class__.mro():
for attr in ns.__dict__:
yield ns.__dict__[attr]
except AttributeError:
raise StopIteration # obj has no __dict__
def _transpose(get_frame, expected_axes, desired_axes):
if list(expected_axes) == list(desired_axes):
return get_frame
else:
transposition = [expected_axes.index(a) for a in desired_axes]
def get_frame_T(**ind):
return get_frame(**ind).transpose(transposition)
return get_frame_T
def _bundle(get_frame, expected_axes, to_iter, sizes, dtype):
bundled_axes = to_iter + expected_axes
shape = [sizes[a] for a in bundled_axes]
iter_shape = shape[:len(to_iter)]
def get_frame_bundled(**ind):
result = np.empty(shape, dtype=dtype)
md_list = []
for indices in itertools.product(*[range(s) for s in iter_shape]):
ind.update({n: i for n, i in zip(to_iter, indices)})
frame = get_frame(**ind)
result[indices] = frame
if hasattr(frame, 'metadata'):
if frame.metadata is not None:
md_list.append(frame.metadata)
# propagate metadata
if len(md_list) == np.prod(iter_shape):
metadata = dict()
keys = md_list[0].keys()
for k in keys:
try:
metadata[k] = [row[k] for row in md_list]
except KeyError:
# if a field is not present in every frame, ignore it
warn('metadata field {} is not propagated')
else:
# if all values are equal, only return one value
if metadata[k][1:] == metadata[k][:-1]:
metadata[k] = metadata[k][0]
else: # cast into ndarray
metadata[k] = np.array(metadata[k])
metadata[k].shape = iter_shape
else:
metadata = None
return Frame(result, metadata=metadata)
return get_frame_bundled, bundled_axes
def _drop(get_frame, expected_axes, to_drop):
# sort axes in descending order for correct function of np.take
to_drop_inds = [list(expected_axes).index(a) for a in to_drop]
indices = np.argsort(to_drop_inds)
axes = [to_drop_inds[i] for i in reversed(indices)]
to_drop = [to_drop[i] for i in reversed(indices)]
result_axes = [a for a in expected_axes if a not in to_drop]
def get_frame_dropped(**ind):
result = get_frame(**ind)
for (ax, name) in zip(axes, to_drop):
result = np.take(result, ind[name], axis=ax)
return result
return get_frame_dropped, result_axes
def _make_get_frame(result_axes, get_frame_dict, sizes, dtype):
methods = list(get_frame_dict.keys())
result_axes = [a for a in result_axes]
result_axes_set = set(result_axes)
# search for get_frame methods that return the right axes
for axes in methods:
if len(set(axes) ^ result_axes_set) == 0:
# _transpose does nothing when axes == result_axes
return _transpose(get_frame_dict[axes], axes, result_axes)
# we need either to drop axes or to iterate over axes:
# collect some numbers to decide what to do
arr = [None] * len(methods)
for i, method in enumerate(methods):
axes_set = set(method)
to_iter_set = result_axes_set - axes_set
to_iter = [x for x in result_axes if x in to_iter_set] # fix the order
n_iter = int(np.prod([sizes[ax] for ax in to_iter]))
to_drop = list(axes_set - result_axes_set)
n_drop = int(np.prod([sizes[ax] for ax in to_drop]))
arr[i] = [method, axes_set, to_iter, n_iter, to_drop, n_drop]
# try to read as less data as possible: try n_drop == 0
# sort in increasing number of iterations
arr.sort(key=lambda x: x[3])
for method, axes_set, to_iter, n_iter, to_drop, n_drop in arr:
if n_drop > 0:
continue
bundled_axes = to_iter + list(method)
get_frame, after_bundle = _bundle(get_frame_dict[method], method,
to_iter, sizes, dtype)
return _transpose(get_frame, bundled_axes, result_axes)
# try to iterate without dropping axes
# sort in increasing number of dropped frames
# TODO: sometimes dropping some data is better than having many iterations
arr.sort(key=lambda x: x[5])
for method, axes_set, to_iter, n_iter, to_drop, n_drop in arr:
if n_iter > 0:
continue
get_frame, after_drop = _drop(get_frame_dict[method], method, to_drop)
return _transpose(get_frame, after_drop, result_axes)
# worst case: all methods have both too many axes and require iteration
# take lowest number of dropped frames
# if indecisive, take lowest number of iterations
arr.sort(key=lambda x: (x[3], x[5]))
method, axes_set, to_iter, n_iter, to_drop, n_drop = arr[0]
get_frame, after_drop = _drop(get_frame_dict[method], method, to_drop)
get_frame, after_bundle = _bundle(get_frame, after_drop, to_iter,
sizes, dtype)
return _transpose(get_frame, after_bundle, result_axes)
class FramesSequenceND(FramesSequence):
""" A base class defining a FramesSequence with an arbitrary number of
axes. In the context of this reader base class, dimensions like 'x', 'y',
't' and 'z' will be called axes. Indices along these axes will be called
coordinates.
The properties `bundle_axes`, `iter_axes`, and `default_coords` define
to which coordinates each index points. See below for a description of
each attribute.
Subclassed readers only need to define `pixel_type` and `__init__`. At least
one reader method needs to be registered as such using
`self._register_get_frame(method, <list of axes>)`.
In the `__init__`, axes need to be initialized using `_init_axis(name, size)`.
It is recommended to set default values to `bundle_axes` and `iter_axes`.
The attributes `__len__`, `get_frame`, and the attributes below are defined
by this base_class; these should not be changed by derived classes.
Attributes
----------
axes : list of strings
List of all available axes
ndim : int
Number of image axes
sizes : dict of int
Dictionary with all axis sizes
frame_shape : tuple of int
Shape of frames that will be returned by get_frame
iter_axes : iterable of strings
This determines which axes will be iterated over by the FramesSequence.
The last element in will iterate fastest. Default [].
bundle_axes : iterable of strings
This determines which axes will be bundled into one Frame. The axes in
the ndarray that is returned by get_frame have the same order as the
order in this list. Default ['y', 'x'].
default_coords: dict of int
When an axis is not present in both iter_axes and bundle_axes, the
coordinate contained in this dictionary will be used. Default 0 for each.
Examples
--------
>>> class DummyReaderND(FramesSequenceND):
... @property
... def pixel_type(self):
... return 'uint8'
... def __init__(self, shape, **axes):
... super(DummyReaderND, self).__init__() # properly initialize
... self._init_axis('y', shape[0])
... self._init_axis('x', shape[1])
... for name in axes:
... self._init_axis(name, axes[name])
... self._register_get_frame(self.get_frame_2D, 'yx')
... self.bundle_axes = 'yx' # set default value
... if 't' in axes:
... self.iter_axes = 't' # set default value
... def get_frame_2D(self, **ind):
... return np.zeros((self.sizes['y'], self.sizes['x']),
... dtype=self.pixel_type)
>>> frames = MDummy((64, 64), t=80, c=2, z=10, m=5)
>>> frames.bundle_axes = 'czyx'
>>> frames.iter_axes = 't'
>>> frames.default_coords['m'] = 3
>>> frames[5] # returns Frame at T=5, M=3 with shape (2, 10, 64, 64)
"""
def __init__(self):
self._clear_axes()
self._get_frame_dict = dict()
def _register_get_frame(self, method, axes):
axes = tuple([a for a in axes])
if not hasattr(self, '_get_frame_dict'):
warn("Please call FramesSequenceND.__init__() at the start of the"
"the reader initialization.")
self._get_frame_dict = dict()
self._get_frame_dict[axes] = method
def _clear_axes(self):
self._sizes = {}
self._default_coords = {}
self._iter_axes = []
self._bundle_axes = ['y', 'x']
self._get_frame_wrapped = None
def _init_axis(self, name, size, default=0):
# check if the axes have been initialized, if not, do it here
if not hasattr(self, '_sizes'):
warn("Please call FramesSequenceND.__init__() at the start of the"
"the reader initialization.")
self._clear_axes()
self._get_frame_dict = dict()
if name in self._sizes:
raise ValueError("axis '{}' already exists".format(name))
self._sizes[name] = int(size)
self.default_coords[name] = int(default)
def __len__(self):
return int(np.prod([self._sizes[d] for d in self._iter_axes]))
@property
def frame_shape(self):
""" Returns the shape of the frame as returned by get_frame. """
return tuple([self._sizes[d] for d in self._bundle_axes])
@property
def axes(self):
""" Returns a list of all axes. """
return [k for k in self._sizes]
@property
def ndim(self):
""" Returns the number of axes. """
return len(self._sizes)
@property
def sizes(self):
""" Returns a dict of all axis sizes. """
return self._sizes
@property
def bundle_axes(self):
""" This determines which axes will be bundled into one Frame.
The ndarray that is returned by get_frame has the same axis order
as the order of `bundle_axes`.
"""
return self._bundle_axes
@bundle_axes.setter
def bundle_axes(self, value):
value = list(value)
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
for k in value:
if k in self._iter_axes:
del self._iter_axes[self._iter_axes.index(k)]
self._bundle_axes = value
if not hasattr(self, '_get_frame_dict'):
warn("Please call FramesSequenceND.__init__() at the start of the"
"the reader initialization.")
self._get_frame_dict = dict()
if len(self._get_frame_dict) == 0:
if hasattr(self, 'get_frame_2D'):
# include get_frame_2D for backwards compatibility
self._register_get_frame(self.get_frame_2D, 'yx')
else:
raise RuntimeError('No reader methods found. Register a reader '
'method with _register_get_frame')
# update the get_frame method
get_frame = _make_get_frame(self._bundle_axes, self._get_frame_dict,
self.sizes, self.pixel_type)
self._get_frame_wrapped = get_frame
@property
def iter_axes(self):
""" This determines which axes will be iterated over by the
FramesSequence. The last element will iterate fastest. """
return self._iter_axes
@iter_axes.setter
def iter_axes(self, value):
value = list(value)
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
for k in value:
if k in self._bundle_axes:
del self._bundle_axes[self._bundle_axes.index(k)]
self._iter_axes = value
@property
def default_coords(self):
""" When a axis is not present in both iter_axes and bundle_axes, the
coordinate contained in this dictionary will be used. """
return self._default_coords
@default_coords.setter
def default_coords(self, value):
invalid = [k for k in value if k not in self._sizes]
if invalid:
raise ValueError("axes %r do not exist" % invalid)
self._default_coords.update(**value)
def get_frame(self, i):
""" Returns a Frame of shape determined by bundle_axes. The index value
is interpreted according to the iter_axes property. Coordinates not
present in both iter_axes and bundle_axes will be set to their default
value (see default_coords). """
if i > len(self):
raise IndexError('index out of range')
if self._get_frame_wrapped is None:
self.bundle_axes = tuple(self.bundle_axes) # kick bundle_axes
# start with the default coordinates
coords = self.default_coords.copy()
# list sizes of iteration axes
iter_sizes = [self._sizes[k] for k in self.iter_axes]
# list how much i has to increase to get an increase of coordinate n
iter_cumsizes = np.append(np.cumprod(iter_sizes[::-1])[-2::-1], 1)
# calculate the coordinates and update the coords dictionary
iter_coords = (i // iter_cumsizes) % iter_sizes
coords.update(**{k: v for k, v in zip(self.iter_axes, iter_coords)})
result = self._get_frame_wrapped(**coords)
if hasattr(result, 'metadata'):
metadata = result.metadata
else:
metadata = dict()
metadata_axes = set(self.axes) - set(self.bundle_axes)
metadata_coords = {ax: coords[ax] for ax in metadata_axes}
metadata.update(dict(axes=self.bundle_axes, coords=metadata_coords))
return Frame(result, frame_no=i, metadata=metadata)
def __repr__(self):
s = "<FramesSequenceND>\nAxes: {0}\n".format(self.ndim)
for dim in self._sizes:
s += "Axis '{0}' size: {1}\n".format(dim, self._sizes[dim])
s += """Pixel Datatype: {dtype}""".format(dtype=self.pixel_type)
return s
| {
"repo_name": "tacaswell/pims",
"path": "pims/base_frames.py",
"copies": "1",
"size": "23446",
"license": "bsd-3-clause",
"hash": 2786667478823571500,
"line_mean": 35.4634525661,
"line_max": 82,
"alpha_frac": 0.5706730359,
"autogenerated": false,
"ratio": 4.092511782160936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5163184818060935,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip, range
from copy import copy
import itertools
import functools
from collections import deque
import numpy as np
from scipy.spatial import cKDTree
import pandas as pd
from .utils import print_update
from .try_numba import try_numba_autojit, NUMBA_AVAILABLE
class TreeFinder(object):
def __init__(self, points):
"""Takes a list of particles.
"""
self.points = copy(points)
self.rebuild()
def add_point(self, pt):
self.points.append(pt)
self._clean = False
def rebuild(self, coord_map=None):
"""Rebuilds tree from ``points`` attribute.
coord_map : function, optional
Called with a list of N Point instances, returns their
"effective" locations, as an N x d array (or list of tuples).
Used for prediction (see "predict" module).
rebuild() needs to be called after ``add_point()`` and
before tree is used for spatial queries again (i.e. when
memory is turned on).
"""
if coord_map is None:
coord_map = functools.partial(map, lambda x: x.pos)
coords = np.asarray(list(coord_map(self.points)))
if len(self.points) == 0:
raise ValueError('Frame (aka level) contains zero points')
self._kdtree = cKDTree(coords, 15)
# This could be tuned
self._clean = True
@property
def kdtree(self):
if not self._clean:
self.rebuild()
return self._kdtree
class HashTable(object):
"""Basic hash table for fast look up of particles in neighborhood.
Parameters
----------
dims : ND tuple
the range of the data to be put in the hash table.
0<data[k]<dims[k]
box_size : float
how big each box should be in data units.
The same scale is used for all dimensions
"""
class Out_of_hash_excpt(Exception):
"""
:py:exc:`Exception` for indicating that a particle is outside of the
valid range for this hash table."""
pass
def __init__(self, dims, box_size):
'''
Sets up the hash table
'''
# the dimensions of the data
self.dims = dims
# the size of boxes to use in the units of the data
self.box_size = box_size
self.hash_dims = np.ceil(np.array(dims) / box_size)
self.hash_table = [[] for j in range(int(np.prod(self.hash_dims)))]
# how many spatial dimensions
self.spat_dims = len(dims)
self.cached_shifts = None
self.cached_rrange = None
self.strides = np.cumprod(
np.concatenate(([1], self.hash_dims[1:])))[::-1]
def get_region(self, point, rrange):
'''
Returns all the particles within the region of maximum radius
rrange in data units. This may return Points that are farther
than rrange.
Parameters
----------
point : Point
point to find the features around
rrange: float
the size of the ball to search in data units.
'''
hash_size = self.hash_dims
center = np.floor(point.pos / self.box_size)
if any(center >= hash_size) or any(center < 0):
raise Hash_table.Out_of_hash_excpt("cord out of range")
rrange = int(np.ceil(rrange / self.box_size))
# check if we have already computed the shifts
if rrange == self.cached_rrange and self.cached_shifts is not None:
shifts = self.cached_shifts # if we have, use them
# Other wise, generate them
else:
if self.spat_dims == 2:
shifts = [np.array([j, k])
for j in range(-rrange, rrange + 1)
for k in range(-rrange, rrange + 1)]
elif self.spat_dims == 3:
shifts = [np.array([j, k, m])
for j in range(-rrange, rrange + 1)
for k in range(-rrange, rrange + 1)
for m in range(-rrange, rrange + 1)]
else:
raise NotImplementedError('only 2 and 3 dimensions implemented')
self.cached_rrange = rrange # and save them
self.cached_shifts = shifts
region = []
for s in shifts:
cord = center + s
if any(cord >= hash_size) or any(cord < 0):
continue
indx = int(sum(cord * self.strides))
region.extend(self.hash_table[indx])
return region
def add_point(self, point):
"""
Adds the `point` to the hash table.
Assumes that :py:attr:`point.pos` exists and is the array-like.
Parameters
----------
point : Point
object representing the feature to add to the hash table
"""
cord = np.floor(np.asarray(point.pos) / self.box_size)
hash_size = self.hash_dims
if any(cord >= hash_size) or any(cord < 0):
raise Hash_table.Out_of_hash_excpt("cord out of range")
indx = int(sum(cord * self.strides))
self.hash_table[indx].append(point)
class TrackUnstored(object):
"""
Base class for objects to represent linked tracks.
Includes logic for adding features to the track, but does
not store the track's particles in memory.
Parameters
----------
point : Point or None, optional
The first feature in the track
"""
count = 0
def __init__(self, point=None):
self.id = self.__class__.count
self.indx = self.id # redundant, but like trackpy
self.__class__.count += 1
if point is not None:
self.add_point(point)
def add_point(self, point):
point.add_to_track(self)
def incr_memory(self):
"""Mark this track as being remembered for one more frame.
For diagnostic purposes."""
try:
self._remembered += 1
except AttributeError:
self._remembered = 1
def report_memory(self):
"""Report and reset the memory counter (when a link is made).
For diagnostic purposes."""
try:
m = self._remembered
del self._remembered
return m
except AttributeError:
return 0
@classmethod
def reset_counter(cls, c=0):
cls.count = c
def __repr__(self):
return "<%s %d>" % (self.__class__.__name__, self.indx)
class Track(TrackUnstored):
'''
Base class for objects to represent linked tracks.
Includes logic for adding, removing features to the track. This can
be sub-classed to provide additional track level computation as
needed.
Parameters
----------
point : Point or None, optional
The first feature in the track
'''
count = 0
def __init__(self, point=None):
self.points = []
super(Track, self).__init__(point)
def __iter__(self):
return self.points.__iter__()
def __len__(self):
return len(self.points)
def __eq__(self, other):
return self.index == other.index
def __neq__(self, other):
return not self.__eq__(other)
__hash__ = None
def add_point(self, point):
'''
:param point: point to add
:type point: :py:class:`~trackpy.linking.Point`
Appends the point to this track. '''
self.points.append(point)
point.add_to_track(self)
def remove_point(self, point):
'''
:param point: point to remove from this track
:type point: :py:class:`~trackpy.linking.Point`
removes a point from this track'''
self.points.remove(point)
point._track = None
def last_point(self):
'''
:rtype: :py:class:`~trackpy.linking.Point`
Returns the last point on the track'''
return self.points[-1]
class Point(object):
'''
Base class for point (features) used in tracking. This class
contains all of the general stuff for interacting with
:py:class:`~trackpy.linking.Track` objects.
.. note:: To be used for tracking this class must be sub-classed to provide
a :py:meth:`distance` function. Child classes **MUST** call
:py:meth:`Point.__init__`. (See :py:class:`~trackpy.linking.PointND` for
example. )
'''
count = 0
def __init__(self):
self._track = None
self.uuid = Point.count # unique id for __hash__
Point.count += 1
# def __eq__(self, other):
# return self.uuid == other.uuid
# def __neq__(self, other):
# return not self.__eq__(other)
def add_to_track(self, track):
'''
:param track: the track to assign to this :py:class:`Point`
Sets the track of a :py:class:`Point` object. Raises
:py:exc:`Exception` if the object is already assigned a track.
'''
if self._track is not None:
raise Exception("trying to add a particle already in a track")
self._track = track
def remove_from_track(self, track):
'''
:param track: the track to disassociate from this :py:class:`Point`
Removes this point from the given track. Raises :py:exc:`Exception` if
particle not associated with the given track.
'''
if self._track != track:
raise Exception("Point not associated with given track")
track.remove_point(self)
def in_track(self):
'''
:rtype: bool
Returns if a point is associated with a track '''
return self._track is not None
@property
def track(self):
"""Returns the track that this :class:`Point` is in. May be `None` """
return self._track
class PointND(Point):
'''
Version of :class:`Point` for tracking in flat space with
non-periodic boundary conditions.
Parameters
----------
t : scalar
a time-like variable.
pos : array-like
position of feature
id : int, optional
external unique ID
'''
def __init__(self, t, pos, id=None):
Point.__init__(self) # initialize base class
self.t = t # time
self.pos = np.asarray(pos) # position in ND space
self.id = id
def distance(self, other_point):
'''
:param other_point: point to get distance to.
:type other_point: :py:class:`~trackpy.linking.Point`
Returns the absolute distance between this point and other_point
'''
return np.sqrt(np.sum((self.pos - other_point.pos) ** 2))
def __str__(self):
return "({t}, {p})".format(t=self.t, p=self.pos)
def __repr__(self):
coords = '(' + (', '.join(["{:.3f}"]*len(self.pos))).format(*self.pos) + ')'
track = " in Track %d" % self.track.indx if self.track else ""
return "<%s at %d, " % (self.__class__.__name__, self.t) + coords + track + ">"
class PointDiagnostics(object):
"""Mixin to add memory diagnostics collection to a Point object."""
def __init__(self, *args, **kwargs):
super(PointDiagnostics, self).__init__(*args, **kwargs)
self.diag = {}
def add_to_track(self, track):
super(PointDiagnostics, self).add_to_track(track)
# See the note in the memory section of Linker.link(). If this link
# is from memory, the track knows how many frames were skipped.
memcount = track.report_memory()
if memcount > 0:
self.diag['remembered'] = memcount
class PointNDDiagnostics(PointDiagnostics, PointND):
"""Version of :class:`PointND` that collects diagnostic information
during tracking.
"""
pass
def link(levels, search_range, hash_generator, memory=0, track_cls=None,
neighbor_strategy='BTree', link_strategy='recursive'):
"""Link features into trajectories, assigning a label to each trajectory.
This function is deprecated and lacks some recently-added options,
though it is still accurate. Use link_df or link_iter.
Parameters
----------
levels : iterable of iterables containing Points objects
e.g., a list containing lists with the Points in each frame
search_range : float
the maximum distance features can move between frames
hash_generator : a function that returns a HashTable
only used if neighbor_strategy is set to 'BTree' (default)
memory : integer
the maximum number of frames during which a feature can vanish,
then reppear nearby, and be considered the same particle. 0 by default.
neighbor_strategy : {'BTree', 'KDTree'}
algorithm used to identify nearby features
link_strategy : {'recursive', 'nonrecursive', 'numba', 'drop', 'auto'}
algorithm used to resolve subnetworks of nearby particles
'auto' uses numba if available
'drop' causes particles in subnetworks to go unlinked
Returns
-------
tracks : list of Track (or track_cls) objects
See Also
--------
link_df, link_iter
"""
# An informative error to help newbies who go astray
if isinstance(levels, pd.DataFrame):
raise TypeError("Instead of link, use link_df, which accepts "
"pandas DataFrames.")
if track_cls is None:
track_cls = Track # stores Points
label_generator = link_iter(iter(levels), search_range, memory=memory,
neighbor_strategy=neighbor_strategy,
link_strategy=link_strategy,
track_cls=track_cls,
hash_generator=hash_generator)
labels = list(label_generator)
points = [level for level_list in levels for level in level_list] # flat
points = pd.Series(points)
labels = [label.track.indx for label_list in labels
for label in label_list] # flat
grouped = points.groupby(labels)
representative_points = grouped.first() # one point from each Track
tracks = representative_points.apply(lambda x: x.track)
return tracks
def link_df(features, search_range, memory=0,
neighbor_strategy='KDTree', link_strategy='auto',
predictor=None, adaptive_stop=None, adaptive_step=0.95,
copy_features=False, diagnostics=False, pos_columns=None,
t_column=None, hash_size=None, box_size=None,
verify_integrity=True, retain_index=False):
"""Link features into trajectories, assigning a label to each trajectory.
Parameters
----------
features : DataFrame
Must include any number of column(s) for position and a column of
frame numbers. By default, 'x' and 'y' are expected for position,
and 'frame' is expected for frame number. See below for options to use
custom column names. After linking, this DataFrame will contain a
'particle' column.
search_range : float
the maximum distance features can move between frames
memory : integer
the maximum number of frames during which a feature can vanish,
then reppear nearby, and be considered the same particle. 0 by default.
neighbor_strategy : {'KDTree', 'BTree'}
algorithm used to identify nearby features
link_strategy : {'recursive', 'nonrecursive', 'numba', 'drop', 'auto'}
algorithm used to resolve subnetworks of nearby particles
'auto' uses numba if available
'drop' causes particles in subnetworks to go unlinked
predictor : function, optional
Improve performance by guessing where a particle will be in
the next frame.
For examples of how this works, see the "predict" module.
adaptive_stop : float, optional
If not None, when encountering an oversize subnet, retry by progressively
reducing search_range until the subnet is solvable. If search_range
becomes <= adaptive_stop, give up and raise a SubnetOversizeException.
adaptive_step : float, optional
Reduce search_range by multiplying it by this factor.
Returns
-------
trajectories : DataFrame
This is the input features DataFrame, now with a new column labeling
each particle with an ID number. This is not a copy; the original
features DataFrame is modified.
Other Parameters
----------------
copy_features : boolean
Leave the original features DataFrame intact (slower, uses more memory)
diagnostics : boolean
Collect details about how each particle was linked, and return as
columns in the output DataFrame. Implies copy=True.
pos_columns : DataFrame column names (unlimited dimensions)
Default is ['x', 'y']
t_column : DataFrame column name
Default is 'frame'
hash_size : sequence
For 'BTree' mode only. Define the shape of the search region.
If None (default), infer shape from range of data.
box_size : sequence
For 'BTree' mode only. Define the parition size to optimize
performance. If None (default), the search_range is used, which is
a reasonable guess for best performance.
verify_integrity : boolean
False by default for fastest performance.
Use True if you suspect a bug in linking.
retain_index : boolean
By default, the index is reset to be sequential. To keep the original
index, set to True. Default is fine unless you devise a special use.
"""
# Assign defaults. (Do it here to avoid "mutable defaults" issue.)
if pos_columns is None:
pos_columns = ['x', 'y']
if t_column is None:
t_column = 'frame'
if hash_size is None:
MARGIN = 1 # avoid OutOfHashException
hash_size = features[pos_columns].max() + MARGIN
# Group the DataFrame by time steps and make a 'level' out of each
# one, using the index to keep track of Points.
if retain_index:
orig_index = features.index.copy() # Save it; restore it at the end.
features.reset_index(inplace=True, drop=True)
levels = (_build_level(frame, pos_columns, t_column,
diagnostics=diagnostics) for frame_no, frame
in features.groupby(t_column))
labeled_levels = link_iter(
levels, search_range, memory=memory, predictor=predictor,
adaptive_stop=adaptive_stop, adaptive_step=adaptive_step,
neighbor_strategy=neighbor_strategy, link_strategy=link_strategy,
hash_size=hash_size, box_size=box_size)
if diagnostics:
features = strip_diagnostics(features) # Makes a copy
elif copy_features:
features = features.copy()
# Do the tracking, and update the DataFrame after each iteration.
features['particle'] = np.nan # placeholder
for level in labeled_levels:
index = [x.id for x in level]
labels = pd.Series([x.track.id for x in level], index)
frame_no = next(iter(level)).t # uses an arbitary element from the set
if verify_integrity:
# This checks that the labeling is sane and tries
# to raise informatively if some unknown bug in linking
# produces a malformed labeling.
_verify_integrity(frame_no, labels)
# an additional check particular to link_df
if len(labels) > len(features[features[t_column] == frame_no]):
raise UnknownLinkingError("There are more labels than "
"particles to be labeled in Frame "
"%d".format(frame_no))
features['particle'].update(labels)
if diagnostics:
_add_diagnostic_columns(features, level)
msg = "Frame %d: %d trajectories present" % (frame_no, len(labels))
print_update(msg)
if retain_index:
features.index = orig_index
# And don't bother to sort -- user must be doing something special.
else:
features.sort(['particle', t_column], inplace=True)
features.reset_index(drop=True, inplace=True)
return features
def link_df_iter(features, search_range, memory=0,
neighbor_strategy='KDTree', link_strategy='auto',
predictor=None, adaptive_stop=None, adaptive_step=0.95,
diagnostics=False, pos_columns=None,
t_column=None, hash_size=None, box_size=None,
verify_integrity=True, retain_index=False):
"""Link features into trajectories, assigning a label to each trajectory.
Parameters
----------
features : iterable of DataFrames
Each DataFrame must include any number of column(s) for position and a
column of frame numbers. By default, 'x' and 'y' are expected for
position, and 'frame' is expected for frame number. See below for
options to use custom column names.
search_range : float
the maximum distance features can move between frames
memory : integer
the maximum number of frames during which a feature can vanish,
then reppear nearby, and be considered the same particle. 0 by default.
neighbor_strategy : {'KDTree', 'BTree'}
algorithm used to identify nearby features. Note that when using
BTree, you must specify hash_size
link_strategy : {'recursive', 'nonrecursive', 'numba', 'drop', 'auto'}
algorithm used to resolve subnetworks of nearby particles
'auto' uses numba if available
'drop' causes particles in subnetworks to go unlinked
predictor : function, optional
Improve performance by guessing where a particle will be in the
next frame.
For examples of how this works, see the "predict" module.
adaptive_stop : float, optional
If not None, when encountering an oversize subnet, retry by progressively
reducing search_range until the subnet is solvable. If search_range
becomes <= adaptive_stop, give up and raise a SubnetOversizeException.
adaptive_step : float, optional
Reduce search_range by multiplying it by this factor.
Returns
-------
trajectories : DataFrame
This is the input features DataFrame, now with a new column labeling
each particle with an ID number for each frame.
Other Parameters
----------------
diagnostics : boolean
Collect details about how each particle was linked, and return as
columns in the output DataFrame.
pos_columns : DataFrame column names (unlimited dimensions)
Default is ['x', 'y']
t_column : DataFrame column name
Default is 'frame'
hash_size : sequence
For 'BTree' mode only. Define the shape of the search region.
box_size : sequence
For 'BTree' mode only. Define the parition size to optimize
performance. If None (default), the search_range is used, which is
a reasonable guess for best performance.
verify_integrity : boolean
False by default, for fastest performance.
Use True if you suspect a bug in linking.
retain_index : boolean
By default, the index is reset to be sequential. To keep the original
index, set to True. Default is fine unless you devise a special use.
"""
# Assign defaults. (Do it here to avoid "mutable defaults" issue.)
if pos_columns is None:
pos_columns = ['x', 'y']
if t_column is None:
t_column = 'frame'
# Group the DataFrame by time steps and make a 'level' out of each
# one, using the index to keep track of Points.
# Non-destructively check the type of the first item of features
feature_iter, feature_checktype_iter = itertools.tee(iter(features))
try: # If it quacks like a DataFrame...
next(feature_checktype_iter).reset_index()
except AttributeError:
raise ValueError("Features data must be an iterable of DataFrames, one per "
"video frame. Use link_df() if you have a single DataFrame "
"describing multiple frames.")
del feature_checktype_iter # Otherwise pipes will back up.
# To allow retain_index
features_for_reset, features_forindex = itertools.tee(feature_iter)
index_iter = (fr.index.copy() for fr in features_forindex)
# To allow extra columns to be recovered later
features_forlinking, features_forpost = itertools.tee(
(frame.reset_index(drop=True) for frame in features_for_reset))
# make a generator over the frames
levels = (_build_level(frame, pos_columns, t_column, diagnostics=diagnostics)
for frame in features_forlinking)
# make a generator of the levels post-linking
labeled_levels = link_iter(
levels, search_range, memory=memory, predictor=predictor,
adaptive_stop=adaptive_stop, adaptive_step=adaptive_step,
neighbor_strategy=neighbor_strategy, link_strategy=link_strategy,
hash_size=hash_size, box_size=box_size)
# Re-assemble the features data, now with track labels and (if desired)
# the original index.
for labeled_level, source_features, old_index in zip(
labeled_levels, features_forpost, index_iter):
features = source_features.copy()
features['particle'] = np.nan # placeholder
index = [x.id for x in labeled_level]
labels = pd.Series([x.track.id for x in labeled_level], index)
# uses an arbitary element from the set
frame_no = next(iter(labeled_level)).t
if verify_integrity:
# This checks that the labeling is sane and tries
# to raise informatively if some unknown bug in linking
# produces a malformed labeling.
_verify_integrity(frame_no, labels)
# additional checks particular to link_df_iter
if not all(frame_no == source_features[t_column].values):
raise UnknownLinkingError("The features passed for Frame %d "
"do not all share the same frame "
"number.".format(frame_no))
if len(labels) > len(features):
raise UnknownLinkingError("There are more labels than "
"particles to be labeled in Frame "
"%d".format(frame_no))
features['particle'].update(labels)
if diagnostics:
_add_diagnostic_columns(features, labeled_level)
if retain_index:
features.index = old_index
# TODO: don't run index.copy() even when retain_index is false
else:
features.sort('particle', inplace=True)
features.reset_index(drop=True, inplace=True)
msg = "Frame %d: %d trajectories present" % (frame_no, len(labels))
print_update(msg)
yield features
def _build_level(frame, pos_columns, t_column, diagnostics=False):
"""Return PointND objects for a DataFrame of points.
Parameters
----------
frame : DataFrame
Unlinked points data.
pos_columns : list
Names of position columns in "frame"
t_column : string
Name of time column in "frame"
diagnostics : boolean, optional
Whether resulting point objects should collect diagnostic information.
"""
if diagnostics:
point_cls = PointNDDiagnostics
else:
point_cls = PointND
return list(map(point_cls, frame[t_column],
frame[pos_columns].values, frame.index))
def _add_diagnostic_columns(features, level):
"""Copy the diagnostic information stored in each particle to the
corresponding columns in 'features'. Create columns as needed."""
diag = pd.DataFrame({x.id: x.diag for x in level}, dtype=object).T
diag.columns = ['diag_' + cn for cn in diag.columns]
for cn in diag.columns:
if cn not in features.columns:
features[cn] = pd.Series(np.nan, dtype=float, index=features.index)
features.update(diag)
def strip_diagnostics(tracks):
"""Remove diagnostic information from a tracks DataFrame.
This returns a copy of the DataFrame. Columns with names that start
with "diag_" are excluded."""
base_cols = [cn for cn in tracks.columns if not cn.startswith('diag_')]
return tracks.reindex(columns=base_cols)
class UnknownLinkingError(Exception):
pass
def _verify_integrity(frame_no, labels):
if labels.duplicated().sum() > 0:
raise UnknownLinkingError(
"There are two particles with the same label in Frame %d.".format(
frame_no))
if np.any(labels < 0):
raise UnknownLinkingError("Some particles were not labeled "
"in Frame %d.".format(frame_no))
def link_iter(levels, search_range, memory=0,
neighbor_strategy='KDTree', link_strategy='auto',
hash_size=None, box_size=None, predictor=None,
adaptive_stop=None, adaptive_step=0.95,
track_cls=None, hash_generator=None):
"""Link features into trajectories, assigning a label to each trajectory.
This function is a generator which yields at each step the Point
objects for the current level. These objects know what trajectory
they are in.
Parameters
----------
levels : iterable of iterables containing Points objects
e.g., a list containing lists with the Points in each frame
search_range : float
the maximum distance features can move between frames
memory : integer
the maximum number of frames during which a feature can vanish,
then reppear nearby, and be considered the same particle. 0 by default.
neighbor_strategy : {'KDTree', 'BTree'}
algorithm used to identify nearby features
link_strategy : {'recursive', 'nonrecursive', 'numba', 'drop', 'auto'}
algorithm used to resolve subnetworks of nearby particles
'auto' uses numba if available
'drop' causes particles in subnetworks to go unlinked
predictor : function, optional
Improve performance by guessing where a particle will be in the
next frame.
For examples of how this works, see the "predict" module.
adaptive_stop : float, optional
If not None, when encountering an oversize subnet, retry by progressively
reducing search_range until the subnet is solvable. If search_range
becomes <= adaptive_stop, give up and raise a SubnetOversizeException.
adaptive_step : float, optional
Reduce search_range by multiplying it by this factor.
Returns
-------
cur_level : iterable of Point objects
The labeled points at each level.
Other Parameters
----------------
hash_size : sequence
For 'BTree' mode only. Define the shape of the search region.
(Higher-level wrappers of link infer this from the data.)
box_size : sequence
For 'BTree' mode only. Define the parition size to optimize
performance. If None (default), the search_range is used, which is
a reasonable guess for best performance.
track_cls : class, optional
for special uses, you can specify a custom class that holds
each Track
hash_generator : function, optional
a function that returns a HashTable, included for legacy support.
Specifying hash_size and box_size (above) fully defined a HashTable.
"""
linker = Linker(search_range, memory=memory, neighbor_strategy=neighbor_strategy,
link_strategy=link_strategy, hash_size=hash_size,
box_size=box_size, predictor=predictor,
adaptive_stop=adaptive_stop, adaptive_step=adaptive_step,
track_cls=track_cls, hash_generator=hash_generator)
return linker.link(levels)
class Linker(object):
"""See link_iter() for a description of parameters."""
# Largest subnet we will attempt to solve.
MAX_SUB_NET_SIZE = 30
# For adaptive search, subnet linking should fail much faster.
MAX_SUB_NET_SIZE_ADAPTIVE = 15
def __init__(self, search_range, memory=0,
neighbor_strategy='KDTree', link_strategy='auto',
hash_size=None, box_size=None, predictor=None,
adaptive_stop=None, adaptive_step=0.95,
track_cls=None, hash_generator=None):
self.search_range = search_range
self.memory = memory
self.predictor = predictor
self.adaptive_stop = adaptive_stop
self.adaptive_step = adaptive_step
self.track_cls = track_cls
self.hash_generator = hash_generator
self.neighbor_strategy = neighbor_strategy
self.diag = False # Whether to save diagnostic info
if self.hash_generator is None:
if neighbor_strategy == 'BTree':
if hash_size is None:
raise ValueError("In 'BTree' mode, you must specify hash_size")
if box_size is None:
box_size = search_range
self.hash_generator = lambda: Hash_table(hash_size, box_size)
if self.track_cls is None:
self.track_cls = TrackUnstored # does not store Points
linkers = {'recursive': recursive_linker_obj,
'nonrecursive': nonrecursive_link,
'drop': drop_link}
if NUMBA_AVAILABLE:
linkers['numba'] = numba_link
linkers['auto'] = linkers['numba']
else:
linkers['auto'] = linkers['recursive']
try:
self.subnet_linker = linkers[link_strategy]
except KeyError:
raise ValueError("link_strategy must be one of: " + ', '.join(linkers.keys()))
if self.neighbor_strategy not in ['KDTree', 'BTree']:
raise ValueError("neighbor_strategy must be 'KDTree' or 'BTree'")
if self.adaptive_stop is not None:
if 1 * self.adaptive_stop <= 0:
raise ValueError("adaptive_stop must be positive.")
self.max_subnet_size = self.MAX_SUB_NET_SIZE_ADAPTIVE
else:
self.max_subnet_size = self.MAX_SUB_NET_SIZE
if 1 * self.adaptive_step <= 0 or 1 * self.adaptive_step >= 1:
raise ValueError("adaptive_step must be between "
"0 and 1 non-inclusive.")
self.subnet_counter = 0 # Unique ID for each subnet
def link(self, levels):
level_iter = iter(levels)
prev_level = next(level_iter)
prev_set = set(prev_level)
# Only save diagnostic info if it's possible. This saves
# 1-2% execution time and significant memory.
# We just check the first particle in the first level.
self.diag = hasattr(next(iter(prev_level)), 'diag')
# Make a Hash / Tree for the first level.
if self.neighbor_strategy == 'BTree':
prev_hash = self.hash_generator()
for p in prev_set:
prev_hash.add_point(p)
elif self.neighbor_strategy == 'KDTree':
prev_hash = TreeFinder(prev_level)
for p in prev_set:
p.forward_cands = []
try:
# Start ID numbers from zero, incompatible with multithreading.
self.track_cls.reset_counter()
except AttributeError:
# must be using a custom Track class without this method
pass
# Assume everything in first level starts a Track.
# Iterate over prev_level, not prev_set, because order -> track ID.
self.track_lst = [self.track_cls(p) for p in prev_level]
self.mem_set = set()
# Initialize memory with empty sets.
mem_history = []
for j in range(self.memory):
mem_history.append(set())
yield list(prev_set) # Short-circuit the loop on first call.
for cur_level in levels:
# Create the set for the destination level.
cur_set = set(cur_level)
tmp_set = set(cur_level) # copy used in next loop iteration
# First, a bit of unfinished business:
# If prediction is enabled, we need to update the positions in prev_hash
# to where we think they'll be in the frame corresponding to cur_level.
if self.predictor is not None:
# This only works for KDTree right now, because KDTree can store particle
# positions in a separate data structure from the PointND instances.
if not isinstance(prev_hash, TreeFinder):
raise NotImplementedError(
'Prediction works with the "KDTree" neighbor_strategy only.')
# Get the time of cur_level from its first particle
t_next = list(itertools.islice(cur_level, 0, 1))[0].t
targeted_predictor = functools.partial(self.predictor, t_next)
prev_hash.rebuild(coord_map=targeted_predictor) # Rewrite positions
# Now we can process the new particles.
# Make a Hash / Tree for the destination level.
if self.neighbor_strategy == 'BTree':
cur_hash = self.hash_generator()
for p in cur_set:
cur_hash.add_point(p)
elif self.neighbor_strategy == 'KDTree':
cur_hash = TreeFinder(cur_level)
# Set up attributes for keeping track of possible connections.
for p in cur_set:
p.back_cands = []
p.forward_cands = []
# Sort out what can go to what.
assign_candidates(cur_level, prev_hash, self.search_range,
self.neighbor_strategy)
# sort the candidate lists by distance
for p in cur_set:
p.back_cands.sort(key=lambda x: x[1])
for p in prev_set:
p.forward_cands.sort(key=lambda x: x[1])
# Note that this modifies cur_set, prev_set, but that's OK.
spl, dpl = self._assign_links(cur_set, prev_set, self.search_range)
new_mem_set = set()
for sp, dp in zip(spl, dpl):
# Do linking
if sp is not None and dp is not None:
sp.track.add_point(dp)
if sp in self.mem_set: # Very rare
self.mem_set.remove(sp)
elif sp is None:
# if unclaimed destination particle, a track is born!
self.track_lst.append(self.track_cls(dp))
elif dp is None:
# add the unmatched source particles to the new
# memory set
new_mem_set.add(sp)
# Clean up
if dp is not None:
del dp.back_cands
if sp is not None:
del sp.forward_cands
# set prev_hash to cur hash
prev_hash = cur_hash
# add in the memory points
# store the current level for use in next loop
if self.memory > 0:
# identify the new memory points
new_mem_set -= self.mem_set
mem_history.append(new_mem_set)
# remove points that are now too old
self.mem_set -= mem_history.pop(0)
# add the new points
self.mem_set |= new_mem_set
# add the memory particles to what will be the next source set
tmp_set |= self.mem_set
# add memory points to prev_hash (to be used as the next source)
for m in self.mem_set:
# add points to the hash
prev_hash.add_point(m)
# Record how many times this particle got "held back".
# Since this particle has already been yielded in a previous
# level, we can't store it there. We'll have to put it in the
# track object, then copy this info to the point in cur_hash
# if/when we make a link.
m.track.incr_memory()
# re-create the forward_cands list
m.forward_cands = []
prev_set = tmp_set
# TODO: Emit debug message with number of
# subnets in this level, numbers of new/remembered/lost particles
yield cur_level
def _assign_links(self, dest_set, source_set, search_range):
"""Match particles in dest_set with source_set.
Returns source, dest lists of equal length, corresponding
to pairs of source and destination particles. A 'None' value
denotes that a match was not found.
The contents of dest_set and source_set will be changed, as
well as the forward_cands and back_cands attributes of the
particles. However, this does not meaningfully change the state
within link(). All meaningful actions are taken within link(),
based on the recommendations of _assign_links().
"""
spl, dpl = [], []
diag = self.diag
# while there are particles left to link, link
while len(dest_set) > 0:
p = dest_set.pop()
bc_c = len(p.back_cands)
# no backwards candidates
if bc_c == 0:
# particle will get a new track
dpl.append(p)
spl.append(None)
if diag:
p.diag['search_range'] = search_range
continue # do next dest_set particle
if bc_c == 1:
# one backwards candidate
b_c_p = p.back_cands[0]
# and only one forward candidate
b_c_p_0 = b_c_p[0]
if len(b_c_p_0.forward_cands) == 1:
# schedule these particles for linking
dpl.append(p)
spl.append(b_c_p_0)
source_set.discard(b_c_p_0)
if diag:
p.diag['search_range'] = search_range
continue # do next dest_set particle
# we need to generate the sub networks
done_flg = False
s_sn = set() # source sub net
d_sn = set() # destination sub net
# add working particle to destination sub-net
d_sn.add(p)
while not done_flg:
d_sn_sz = len(d_sn)
s_sn_sz = len(s_sn)
for dp in d_sn:
for c_sp in dp.back_cands:
s_sn.add(c_sp[0])
source_set.discard(c_sp[0])
for sp in s_sn:
for c_dp in sp.forward_cands:
d_sn.add(c_dp[0])
dest_set.discard(c_dp[0])
done_flg = (len(d_sn) == d_sn_sz) and (len(s_sn) == s_sn_sz)
# add in penalty for not linking
for _s in s_sn:
# If we end up having to recurse for adaptive search, this final
# element will be dropped and re-added, because search_range is
# decreasing.
_s.forward_cands.append((None, search_range))
try:
sn_spl, sn_dpl = self.subnet_linker(s_sn, len(d_sn), search_range,
max_size=self.max_subnet_size,
diag=diag)
if diag:
# Record information about this invocation of the subnet linker.
for dp in d_sn:
dp.diag['subnet'] = self.subnet_counter
dp.diag['subnet_size'] = len(s_sn)
dp.diag['search_range'] = search_range
for dp in d_sn - set(sn_dpl):
# Unclaimed destination particle in subnet
sn_spl.append(None)
sn_dpl.append(dp)
self.subnet_counter += 1
except SubnetOversizeException:
if self.adaptive_stop is None:
raise
# Reduce search_range
new_range = search_range * self.adaptive_step
if search_range <= self.adaptive_stop:
# adaptive_stop is the search_range below which linking
# is presumed invalid. So we just give up.
raise
# Prune the candidate lists of s_sn, d_sn; then recurse.
for sp in s_sn:
sp.forward_cands = [fc for fc in sp.forward_cands
if fc[1] <= new_range]
for dp in d_sn:
dp.back_cands = [bc for bc in dp.back_cands
if bc[1] <= new_range]
sn_spl, sn_dpl = self._assign_links(
d_sn, s_sn, new_range)
spl.extend(sn_spl)
dpl.extend(sn_dpl)
# Leftovers
for pp in source_set:
spl.append(pp)
dpl.append(None)
return spl, dpl
def assign_candidates(cur_level, prev_hash, search_range, neighbor_strategy):
if neighbor_strategy == 'BTree':
# (Tom's code)
for p in cur_level:
work_box = prev_hash.get_region(p, search_range)
for wp in work_box:
d = p.distance(wp)
if d < search_range:
p.back_cands.append((wp, d))
wp.forward_cands.append((p, d))
elif neighbor_strategy == 'KDTree':
hashpts = prev_hash.points
cur_coords = np.array([x.pos for x in cur_level])
dists, inds = prev_hash.kdtree.query(cur_coords, 10, distance_upper_bound=search_range)
nn = np.sum(np.isfinite(dists), 1) # Number of neighbors of each particle
for i, p in enumerate(cur_level):
for j in range(nn[i]):
wp = hashpts[inds[i, j]]
p.back_cands.append((wp, dists[i, j]))
wp.forward_cands.append((p, dists[i, j]))
class SubnetOversizeException(Exception):
'''An :py:exc:`Exception` to be raised when the sub-nets are too big
to be efficiently linked. If you get this then either reduce your search range
or increase :py:attr:`Linker.MAX_SUB_NET_SIZE`'''
pass
def recursive_linker_obj(s_sn, dest_size, search_range, max_size=30, diag=False):
snl = sub_net_linker(s_sn, dest_size, search_range, max_size=max_size)
# In Python 3, we must convert to lists to return mutable collections.
return [list(particles) for particles in zip(*snl.best_pairs)]
class SubnetLinker(object):
"""A helper class for implementing the Crocker-Grier tracking
algorithm. This class handles the recursion code for the sub-net linking"""
def __init__(self, s_sn, dest_size, search_range, max_size=30):
# print 'made sub linker'
self.s_sn = s_sn
self.search_range = search_range
self.max_size = max_size
self.s_lst = [s for s in s_sn]
self.s_lst.sort(key=lambda x: len(x.forward_cands))
self.MAX = len(self.s_lst)
self.max_links = min(self.MAX, dest_size)
self.best_pairs = None
self.cur_pairs = deque([])
self.best_sum = np.Inf
self.d_taken = set()
self.cur_sum = 0
if self.MAX > self.max_size:
raise SubnetOversizeException("Subnetwork contains %d points"
% self.MAX)
# do the computation
self.do_recur(0)
def do_recur(self, j):
cur_s = self.s_lst[j]
for cur_d, dist in cur_s.forward_cands:
tmp_sum = self.cur_sum + dist**2
if tmp_sum > self.best_sum:
# if we are already greater than the best sum, bail we
# can bail all the way out of this branch because all
# the other possible connections (including the null
# connection) are more expensive than the current
# connection, thus we can discard with out testing all
# leaves down this branch
return
if cur_d is not None and cur_d in self.d_taken:
# we have already used this destination point, bail
continue
# add this pair to the running list
self.cur_pairs.append((cur_s, cur_d))
# add the destination point to the exclusion list
if cur_d is not None:
self.d_taken.add(cur_d)
# update the current sum
self.cur_sum = tmp_sum
# buried base case
# if we have hit the end of s_lst and made it this far, it
# must be a better linking so save it.
if j + 1 == self.MAX:
tmp_sum = self.cur_sum + self.search_range**2 * (
self.max_links - len(self.d_taken))
if tmp_sum < self.best_sum:
self.best_sum = tmp_sum
self.best_pairs = list(self.cur_pairs)
else:
# re curse!
self.do_recur(j + 1)
# remove this step from the working
self.cur_sum -= dist**2
if cur_d is not None:
self.d_taken.remove(cur_d)
self.cur_pairs.pop()
pass
def nonrecursive_link(source_list, dest_size, search_range, max_size=30, diag=False):
# print 'non-recursive', len(source_list), dest_size
source_list = list(source_list)
source_list.sort(key=lambda x: len(x.forward_cands))
MAX = len(source_list)
if MAX > max_size:
raise SubnetOversizeException("Subnetwork contains %d points" % MAX)
max_links = min(MAX, dest_size)
k_stack = deque([0])
j = 0
cur_back = deque([])
cur_sum_stack = deque([0])
best_sum = np.inf
best_back = None
cand_list_list = [c.forward_cands for c in source_list]
cand_lens = [len(c) for c in cand_list_list]
while j >= 0:
# grab everything from the end of the stack
cur_sum = cur_sum_stack[-1]
if j >= MAX:
# base case, no more source candidates,
# save the current configuration if it's better than the current max
# add penalty for not linking to particles in the destination set
tmp_sum = cur_sum + search_range**2 * (
max_links - len([d for d in cur_back if d is not None]))
if tmp_sum < best_sum:
best_sum = cur_sum
best_back = list(cur_back)
j -= 1
k_stack.pop()
cur_sum_stack.pop()
cur_back.pop()
# print 'we have a winner'
# print '-------------------------'
continue
# see if we have any forward candidates
k = k_stack[-1]
if k >= cand_lens[j]:
# no more candidates to try, this branch is done
j -= 1
k_stack.pop()
cur_sum_stack.pop()
if j >= 0:
cur_back.pop()
# print 'out of cands'
# print '-------------------------'
continue
# get the forward candidate
cur_d, cur_dist = cand_list_list[j][k]
tmp_sum = cur_sum + cur_dist**2
if tmp_sum > best_sum:
# nothing in this branch can do better than the current best
j -= 1
k_stack.pop()
cur_sum_stack.pop()
if j >= 0:
cur_back.pop()
# print 'total bail'
# print '-------------------------'
continue
# advance the counter in the k_stack, the next time this level
# of the frame stack is run the _next_ candidate will be run
k_stack[-1] += 1
# check if it's already linked
if cur_d is not None and cur_d in cur_back:
# this will run the loop with almost identical stack, but with advanced k
# print 'already linked cur_d'
# print '-------------------------'
continue
j += 1
k_stack.append(0)
cur_sum_stack.append(tmp_sum)
cur_back.append(cur_d)
# print '-------------------------'
# print 'done'
return source_list, best_back
def numba_link(s_sn, dest_size, search_range, max_size=30, diag=False):
"""Recursively find the optimal bonds for a group of particles between 2 frames.
This is only invoked when there is more than one possibility within
``search_range``.
Note that ``dest_size`` is unused; it is determined from the contents of
the source list.
"""
# The basic idea: replace Point objects with integer indices into lists of Points.
# Then the hard part runs quickly because it is just operating on arrays.
# We can compile it with numba for outstanding performance.
max_candidates = 9 # Max forward candidates we expect for any particle
src_net = list(s_sn)
nj = len(src_net) # j will index the source particles
if nj > max_size:
raise SubnetOversizeException('search_range (aka maxdisp) too large for reasonable performance '
'on these data (sub net contains %d points)' % nj)
# Build arrays of all destination (forward) candidates and their distances
dcands = set()
for p in src_net:
dcands.update([cand for cand, dist in p.forward_cands])
dcands = list(dcands)
dcands_map = {cand: i for i, cand in enumerate(dcands)}
# A source particle's actual candidates only take up the start of
# each row of the array. All other elements represent the null link option
# (i.e. particle lost)
candsarray = np.ones((nj, max_candidates + 1), dtype=np.int64) * -1
distsarray = np.ones((nj, max_candidates + 1), dtype=np.float64) * search_range
ncands = np.zeros((nj,), dtype=np.int64)
for j, sp in enumerate(src_net):
ncands[j] = len(sp.forward_cands)
if ncands[j] > max_candidates:
raise SubnetOversizeException('search_range (aka maxdisp) too large for reasonable performance '
'on these data (particle has %i forward candidates)' % ncands[j])
candsarray[j,:ncands[j]] = [dcands_map[cand] for cand, dist in sp.forward_cands]
distsarray[j,:ncands[j]] = [dist for cand, dist in sp.forward_cands]
# The assignments are persistent across levels of the recursion
best_assignments = np.ones((nj,), dtype=np.int64) * -1
cur_assignments = np.ones((nj,), dtype=np.int64) * -1
tmp_assignments = np.zeros((nj,), dtype=np.int64)
cur_sums = np.zeros((nj,), dtype=np.float64)
# In the next line, distsarray is passed in quadrature so that adding distances works.
loopcount = _numba_subnet_norecur(ncands, candsarray, distsarray**2, cur_assignments, cur_sums,
tmp_assignments, best_assignments)
if diag:
for dr in dcands:
try:
dr.diag['subnet_iterations'] = loopcount
except AttributeError:
pass # dr is "None" -- dropped particle
source_results = list(src_net)
dest_results = [dcands[i] if i >= 0 else None for i in best_assignments]
return source_results, dest_results
@try_numba_autojit(nopython=True)
def _numba_subnet_norecur(ncands, candsarray, dists2array, cur_assignments,
cur_sums, tmp_assignments, best_assignments):
"""Find the optimal track assigments for a subnetwork, without recursion.
This is for nj source particles. All arguments are arrays with nj rows.
cur_assignments, tmp_assignments are just temporary registers of length nj.
best_assignments is modified in place.
Returns the number of assignments tested (at all levels). This is basically
proportional to time spent.
"""
nj = candsarray.shape[0]
tmp_sum = 0.
best_sum = 1.0e23
j = 0
loopcount = 0 # Keep track of iterations. This should be an int64.
while 1:
loopcount += 1
delta = 0 # What to do at the end
# This is an endless loop. We go up and down levels of recursion,
# and emulate the mechanics of nested "for" loops, using the
# blocks of code marked "GO UP" and "GO DOWN". It's not pretty.
# Load state from the "stack"
i = tmp_assignments[j]
#if j == 0:
# print i, j, best_sum
# sys.stdout.flush()
if i > ncands[j]:
# We've exhausted possibilities at this level, including the
# null link; make no more changes and go up a level
#### GO UP
delta = -1
else:
tmp_sum = cur_sums[j] + dists2array[j, i]
if tmp_sum > best_sum:
# if we are already greater than the best sum, bail. we
# can bail all the way out of this branch because all
# the other possible connections (including the null
# connection) are more expensive than the current
# connection, thus we can discard with out testing all
# leaves down this branch
#### GO UP
delta = -1
else:
# We have to seriously consider this candidate.
# We can have as many null links as we want, but the real particles are finite
# This loop looks inefficient but it's what numba wants!
flag = 0
for jtmp in range(nj):
if cur_assignments[jtmp] == candsarray[j, i]:
if jtmp < j:
flag = 1
if flag and candsarray[j, i] >= 0:
# we have already used this destination point; try the next one instead
delta = 0
else:
cur_assignments[j] = candsarray[j, i]
# OK, I guess we'll try this assignment
if j + 1 == nj:
# We have made assignments for all the particles,
# and we never exceeded the previous best_sum.
# This is our new optimum.
# print 'hit: %f' % best_sum
best_sum = tmp_sum
# This array is shared by all levels of recursion.
# If it's not touched again, it will be used once we
# get back to link_subnet
for jtmp in range(nj):
best_assignments[jtmp] = cur_assignments[jtmp]
#### GO UP
delta = -1
else:
# Try various assignments for the next particle
#### GO DOWN
delta = 1
if delta == -1:
if j > 0:
j += -1
tmp_assignments[j] += 1 # Try the next candidate at this higher level
continue
else:
return loopcount
elif delta == 1:
j += 1
cur_sums[j] = tmp_sum # Floor for all subsequent sums
tmp_assignments[j] = 0
else:
tmp_assignments[j] += 1
def drop_link(source_list, dest_size, search_range, max_size=30, diag=False):
"""Handle subnets by dropping particles.
This is an alternate "link_strategy", selected by specifying 'drop',
that simply refuses to solve the subnet. It ends the trajectories
represented in source_list, and results in a new trajectory for
each destination particle.
One possible use is to quickly test whether a given search_range will
result in a SubnetOversizeException."""
if len(source_list) > max_size:
raise SubnetOversizeException("Subnetwork contains %d points"
% len(source_list))
return [sp for sp in source_list], [None,] * len(source_list)
sub_net_linker = SubnetLinker # legacy
Hash_table = HashTable # legacy
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/linking.py",
"copies": "1",
"size": "60720",
"license": "bsd-3-clause",
"hash": -6796835265020981000,
"line_mean": 38.4798439532,
"line_max": 108,
"alpha_frac": 0.5808465086,
"autogenerated": false,
"ratio": 4.184700206753963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5265546715353963,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import collections
import functools
import re
import sys
import warnings
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from scipy import stats
import yaml
def fit_powerlaw(data, plot=True, **kwargs):
"""Fit a powerlaw by doing a linear regression in log space."""
ys = pd.DataFrame(data)
x = pd.Series(data.index.values, index=data.index, dtype=np.float64)
values = pd.DataFrame(index=['n', 'A'])
fits = {}
for col in ys:
y = ys[col].dropna()
slope, intercept, r, p, stderr = \
stats.linregress(np.log(x), np.log(y))
values[col] = [slope, np.exp(intercept)]
fits[col] = x.apply(lambda x: np.exp(intercept)*x**slope)
values = values.T
fits = pd.concat(fits, axis=1)
if plot:
from trackpy import plots
plots.fit(data, fits, logx=True, logy=True, legend=False, **kwargs)
return values
class memo(object):
"""Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
http://wiki.python.org/moin/PythonDecoratorLibrary#Memoize """
def __init__(self, func):
self.func = func
self.cache = {}
functools.update_wrapper(self, func)
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
warnings.warn("A memoization cache is being used on an uncacheable " +
"object. Proceeding by bypassing the cache.",
UserWarning)
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
# This code trips up numba. It's nice for development
# but it shouldn't matter for users.
# def __repr__(self):
# '''Return the function's docstring.'''
# return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def extract(pattern, string, group, convert=None):
"""Extract a pattern from a string. Optionally, convert it
to a desired type (float, timestamp, etc.) by specifying a function.
When the pattern is not found, gracefully return None."""
# group may be 1, (1,) or (1, 2).
if type(group) is int:
grp = (group,)
elif type(group) is tuple:
grp = group
assert type(grp) is tuple, "The arg 'group' should be an int or a tuple."
try:
result = re.search(pattern, string, re.DOTALL).group(*grp)
except AttributeError:
# For easy unpacking, when a tuple is expected, return a tuple of Nones.
return None if type(group) is int else (None,)*len(group)
return convert(result) if convert else result
def timestamp(ts_string):
"Convert a timestamp string to a datetime type."
if ts_string is None:
return None
return datetime.strptime(ts_string, '%Y-%m-%d %H:%M:%S')
def time_interval(raw):
"Convert a time interval string into a timedelta type."
if raw is None:
return None
m = re.match('([0-9][0-9]):([0-5][0-9]):([0-5][0-9])', raw)
h, m, s = map(int, m.group(1, 2, 3))
return timedelta(hours=h, minutes=m, seconds=s)
def suppress_plotting():
import matplotlib.pyplot as plt
plt.switch_backend('Agg') # does not plot to screen
# HH:MM:SS, H:MM:SS, MM:SS, M:SS all OK
lazy_timestamp_pat = r'\d?\d?:?\d?\d:\d\d'
# a time stamp followed by any text comment
ltp = lazy_timestamp_pat
video_log_pattern = r'(' + ltp + r')-?(' + ltp + r')? ?(RF)?(.+)?'
def lazy_timestamp(partial_timestamp):
"""Regularize a lazy timestamp like '0:37' -> '00:00:37'.
HH:MM:SS, H:MM:SS, MM:SS, and M:SS all OK.
Parameters
----------
partial_timestamp : string or other object
Returns
-------
regularized string
"""
if not isinstance(partial_timestamp, str):
# might be NaN or other unprocessable entry
return partial_timestamp
input_format = '\d?\d?:?\d?\d:\d\d'
if not re.match(input_format, partial_timestamp):
raise ValueError("Input string cannot be regularized.")
partial_digits = list(partial_timestamp)
digits = ['0', '0', ':', '0', '0', ':', '0', '0']
digits[-len(partial_digits):] = partial_digits
return ''.join(digits)
def timedelta_to_frame(timedeltas, fps):
"""Convert timedelta times into frame numbers.
Parameters
----------
timedelta : DataFrame or Series of timedelta64 datatype
fps : frames per second (integer)
Result
------
DataFrame
Note
----
This sounds like a stupidly easy operation, but handling missing data
and multiplication is tricky with timedeltas.
"""
ns = timedeltas.values
seconds = ns * 1e-9
frame_numbers = seconds*fps
result = pd.DataFrame(frame_numbers, dtype=np.int64,
index=timedeltas.index, columns=timedeltas.columns)
result = result.where(timedeltas.notnull(), np.nan)
return result
def random_walk(N):
return np.cumsum(np.random.randn(N), 1)
def record_meta(meta_data, filename):
with open(filename, 'w') as output:
output.write(yaml.dump(meta_data, default_flow_style=False))
def validate_tuple(value, ndim):
if not hasattr(value, '__iter__'):
return (value,) * ndim
if len(value) == ndim:
return tuple(value)
raise ValueError("List length should have same length as image dimensions.")
try:
from IPython.core.display import clear_output
except ImportError:
pass
def print_update(message):
"Print a message immediately; do not wait for current execution to finish."
try:
clear_output()
except Exception:
pass
print(message)
sys.stdout.flush()
def make_pandas_strict():
"""Configure Pandas to raise an exception for "chained assignments."
This is useful during tests.
See http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
Does nothing for Pandas versions before 0.13.0.
"""
major, minor, micro = pd.__version__.split('.')
if major == '0' and int(minor) >= 13:
pd.set_option('mode.chained_assignment', 'raise')
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/utils.py",
"copies": "1",
"size": "6527",
"license": "bsd-3-clause",
"hash": -3926802136164843500,
"line_mean": 30.080952381,
"line_max": 91,
"alpha_frac": 0.6284663705,
"autogenerated": false,
"ratio": 3.683408577878104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9805279251016834,
"avg_score": 0.0013191394722540007,
"num_lines": 210
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import datetime
import logging
import uuid
from functools import wraps
from mongoengine import connect
#from metadatastore.document import Document
#from metadatastore.commands import (db_connect, db_disconnect, _ensure_connection,
# _normalize_object_id, _format_time)
from samplemanager import conf
from .util import new_uid
from .odm_templates import (Sample, SampleGroup, Location, Request, SMType)
logger = logging.getLogger(__name__)
def init_db():
"""
Initialize the SampleManager db with required entries.
"""
# basic Sample "Classes" (eg. mesh, pin; defines general handling procedures)
# basic Sample "Types" (eg. size1_mesh, spline_pin; defines specific params)
# Sample Group type
sample_group_type = SMType(uid='', name='sample_group', owner='system')
sample_group_type.save()
# "Named" "Samples" (calibration foils, alignment pins, etc)
# basic Container "Classes" (eg. puck, plate; defines general handling procedures)
# basic Container "Types" (eg. unipuck, standard_386_well_plate; defines specific params)
# "Named" "Containers" (eg. robot_dewer, containers that are part of the beamline)
# basic Request "Types"
# "Named" "Requests" (eg. beamline_alignment, etc)
# For often used, always the same (eg. parameterless), requests.
| {
"repo_name": "cowanml/samplemanager",
"path": "src/samplemanager/db_init.py",
"copies": "1",
"size": "1492",
"license": "bsd-3-clause",
"hash": 9182782875502606000,
"line_mean": 30.0833333333,
"line_max": 95,
"alpha_frac": 0.686997319,
"autogenerated": false,
"ratio": 3.845360824742268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5032358143742268,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import unittest
import nose
import warnings
import os
import numpy as np
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
import pandas
from pandas.util.testing import (assert_series_equal, assert_frame_equal)
import trackpy as tp
from pims import ImageSequence
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
path, _ = os.path.split(os.path.abspath(__file__))
# This is six stuff here because pandas.HDFStore is fussy about the string type of one of
# its option args. There seems to be no good reason for that at all.
if six.PY2:
zlib = six.binary_type('zlib')
elif six.PY3:
zlib = 'zlib'
else:
raise("six is confused")
def _random_hash():
return ''.join(map(str, np.random.randint(0, 10, 10)))
def _skip_if_no_pytables():
try:
import tables
except ImportError:
raise nose.SkipTest('pytables not installed. Skipping.')
class FeatureSavingTester(object):
def prepare(self):
directory = os.path.join(path, 'video', 'image_sequence')
self.v = ImageSequence(os.path.join(directory, '*.png'))
# mass depends on pixel dtype, which differs per reader
minmass = self.v[0].max() * 2
self.PARAMS = {'diameter': 11, 'minmass': minmass, 'invert': True}
self.expected = tp.batch(self.v[[0, 1]], engine='python', meta=False,
**self.PARAMS)
def test_storage(self):
STORE_NAME = 'temp_for_testing_{0}.h5'.format(_random_hash())
if os.path.isfile(STORE_NAME):
os.remove(STORE_NAME)
try:
s = self.storage_class(STORE_NAME)
except IOError:
nose.SkipTest('Cannot make an HDF5 file. Skipping')
else:
tp.batch(self.v[[0, 1]], output=s, engine='python', meta=False,
**self.PARAMS)
self.assertEqual(len(s), 2)
self.assertEqual(s.max_frame, 1)
count_total_dumped = s.dump()['frame'].nunique()
count_one_dumped = s.dump(1)['frame'].nunique()
self.assertEqual(count_total_dumped, 2)
self.assertEqual(count_one_dumped, 1)
assert_frame_equal(s.dump().reset_index(drop=True),
self.expected.reset_index(drop=True))
assert_frame_equal(s[0], s.get(0))
# Putting an empty df should warn
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('ignore')
warnings.simplefilter('always', UserWarning)
s.put(pandas.DataFrame())
assert len(w) == 1
s.close()
os.remove(STORE_NAME)
class TestPandasHDFStore(FeatureSavingTester, unittest.TestCase):
def setUp(self):
_skip_if_no_pytables()
self.prepare()
self.storage_class = tp.PandasHDFStore
class TestPandasHDFStoreBig(FeatureSavingTester, unittest.TestCase):
def setUp(self):
_skip_if_no_pytables()
self.prepare()
self.storage_class = tp.PandasHDFStoreBig
def test_cache(self):
"""Store some frames, make a cache, then store some more frames."""
STORE_NAME = 'temp_for_testing_{0}.h5'.format(_random_hash())
if os.path.isfile(STORE_NAME):
os.remove(STORE_NAME)
try:
s = self.storage_class(STORE_NAME)
except IOError:
nose.SkipTest('Cannot make an HDF5 file. Skipping')
else:
framedata = self.expected[self.expected.frame == 0]
def putfake(store, i):
fdat = framedata.copy()
fdat.frame = i
store.put(fdat)
for i in range(10): putfake(s, i)
assert s._frames_cache is None
s._flush_cache() # Should do nothing
assert set(range(10)) == set(s.frames) # Make cache
assert set(range(10)) == set(s.frames) # Hit memory cache
assert s._frames_cache is not None
assert s._cache_dirty
assert s._CACHE_NAME not in s.store
s._flush_cache()
assert s._CACHE_NAME in s.store
assert not s._cache_dirty
# Invalidate cache
for i in range(10, 20): putfake(s, i)
assert s._frames_cache is None
assert s._CACHE_NAME not in s.store
assert set(range(20)) == set(s.frames)
assert s._frames_cache is not None
s.rebuild_cache() # Just to try it
s.close() # Write cache
# Load cache from disk
s = self.storage_class(STORE_NAME, 'r')
assert set(range(20)) == set(s.frames) # Hit cache
assert not s._cache_dirty
s.close()
os.remove(STORE_NAME)
class TestPandasHDFStoreBigCompressed(FeatureSavingTester, unittest.TestCase):
def setUp(self):
_skip_if_no_pytables()
self.prepare()
self.storage_class = functools.partial(
tp.PandasHDFStoreBig, complevel=4, complib=zlib,
fletcher32=True)
class TestPandasHDFStoreSingleNode(FeatureSavingTester, unittest.TestCase):
def setUp(self):
_skip_if_no_pytables()
self.prepare()
self.storage_class = tp.PandasHDFStoreSingleNode
class TestPandasHDFStoreSingleNodeCompressed(FeatureSavingTester,
unittest.TestCase):
def setUp(self):
_skip_if_no_pytables()
self.prepare()
self.storage_class = functools.partial(
tp.PandasHDFStoreSingleNode,
complevel=4, complib=zlib, fletcher32=True)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/tests/test_feature_saving.py",
"copies": "2",
"size": "6019",
"license": "bsd-3-clause",
"hash": -7410639883974791000,
"line_mean": 33.0056497175,
"line_max": 89,
"alpha_frac": 0.5919587971,
"autogenerated": false,
"ratio": 3.8143219264892267,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0014548423610337743,
"num_lines": 177
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
from contextlib import contextmanager
from .fs import FileStore
from .conf import connection_config
from .core import DatumNotFound
logger = logging.getLogger(__name__)
_FS_SINGLETON = FileStore(connection_config)
def db_disconnect():
_FS_SINGLETON.disconnect()
def db_connect(database, host, port):
_FS_SINGLETON.reconfigure(dict(database=database,
host=host,
port=port))
assert _FS_SINGLETON.config['database'] == database
return _FS_SINGLETON._connection
@contextmanager
def handler_context(temp_handlers):
"""
Context manager for temporarily updating the global handler registry.
This is an alternative to passing a registry in
as a kwarg. The global registry is returned to it's prior state
after the context manager exits.
Parameters
----------
temp_handlers : dict
spec_name : HandlerClass pairs.
Examples
--------
To use a different handler for a call to `retrieve` use
the context manager to add (and possibly over-ride existing
handlers) temporarily:
with handler_context({'syn-spec', SynHandler}):
FS.retrieve(EID)
"""
with _FS_SINGLETON.handler_context(temp_handlers) as fs:
yield fs
def register_handler(key, handler, overwrite=False):
"""
Register a handler to be associated with a specific file
specification key. This controls the dispatch to the
Handler classes based on the `spec` key of the `Resource`
documents.
Parameters
----------
key : str
Name of the spec as it will appear in the FS documents
handler : callable
This needs to be a callable which when called with the
free parameters from the FS documents
overwrite : bool, optional
If False, raise an exception when re-registering an
existing key. Default is False
See Also
--------
`deregister_handler`
"""
_FS_SINGLETON.register_handler(key, handler, overwrite)
def deregister_handler(key):
"""
Remove handler to module-level handler
Parameters
----------
key : str
The spec label to remove
See Also
--------
`register_handler`
"""
_FS_SINGLETON.deregister_handler(key)
def get_spec_handler(resource, handler_registry=None):
"""
Given a document from the base FS collection return
the proper Handler
This should get memozied or shoved into a class eventually
to minimize open/close thrashing.
Parameters
----------
resource : ObjectId
ObjectId of a resource document
handler_registry : HandleRegistry or dict, optional
Mapping between spec <-> handler classes, if None, use
module-level registry
Returns
-------
handler : callable
An object that when called with the values in the event
document returns the externally stored data
"""
handler_registry = handler_registry if handler_registry is not None else {}
with _FS_SINGLETON.handler_context(handler_registry) as fs:
return fs.get_spec_handler(resource)
def get_data(eid, handler_registry=None):
"""
Given a document from the events collection, get the externally
stored data.
This may get wrapped up in a class instance, not intended for public
usage as-is
Parameters
----------
eid : str
The datum ID (as stored in MDS)
handler_registry : HandleRegistry or dict, optional
Mapping between spec <-> handler classes, if None, use
module-level registry
Returns
-------
data : ndarray
The data in ndarray form.
"""
if handler_registry is None:
handler_registry = {}
with _FS_SINGLETON.handler_context(handler_registry) as fs:
return fs.get_datum(eid)
retrieve = get_data
def insert_resource(spec, resource_path, resource_kwargs=None):
"""
Parameters
----------
spec : str
spec used to determine what handler to use to open this
resource.
resource_path : str or None
Url to the physical location of this resource
resource_kwargs : dict
resource_kwargs name/value pairs of additional kwargs to be
passed to the handler to open this resource.
"""
resource_kwargs = resource_kwargs if resource_kwargs is not None else {}
return _FS_SINGLETON.insert_resource(spec, resource_path, resource_kwargs)
def insert_datum(resource, datum_id, datum_kwargs=None):
"""
Parameters
----------
resource : Resource or Resource.id
Resource object
datum_id : str
Unique identifier for this datum. This is the value stored in
metadatastore and is the value passed to `retrieve` to get
the data back out.
datum_kwargs : dict
dict with any kwargs needed to retrieve this specific datum from the
resource.
"""
datum_kwargs = datum_kwargs if datum_kwargs is not None else {}
return _FS_SINGLETON.insert_datum(resource, datum_id, datum_kwargs)
def bulk_insert_datum(resource, datum_ids, datum_kwarg_list):
return _FS_SINGLETON.bulk_insert_datum(resource, datum_ids,
datum_kwarg_list)
| {
"repo_name": "stuwilkins/filestore",
"path": "filestore/api.py",
"copies": "1",
"size": "5432",
"license": "bsd-3-clause",
"hash": -1480551365377988900,
"line_mean": 25.1153846154,
"line_max": 79,
"alpha_frac": 0.6491163476,
"autogenerated": false,
"ratio": 4.398380566801619,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.554749691440162,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from scipy.ndimage.filters import uniform_filter1d
from scipy.ndimage.fourier import fourier_gaussian
from .utils import print_update, validate_tuple
# When loading module, try to use pyFFTW ("Fastest Fourier Transform in the
# West") if it is available.
try:
import pyfftw
except ImportError:
# Use numpy.
USING_FFTW = False
fftn = np.fft.fftn
ifftn = np.fft.ifftn
else:
USING_FFTW = True
pyfftw.interfaces.cache.enable()
planned = False
def fftn(a):
global planned
if not planned:
print_update("Note: FFTW is configuring itself. This will take " +
"several seconds, but subsequent calls will run " +
"*much* faster.")
planned = True
a = pyfftw.n_byte_align(a, a.dtype.alignment)
return pyfftw.interfaces.numpy_fft.fftn(a).astype(np.complex128)
def ifftn(a):
a = pyfftw.n_byte_align(a, a.dtype.alignment)
return pyfftw.interfaces.numpy_fft.ifftn(a)
def bandpass(image, lshort, llong, threshold=None):
"""Convolve with a Gaussian to remove short-wavelength noise,
and subtract out long-wavelength variations,
retaining features of intermediate scale.
Parmeters
---------
image : ndarray
lshort : small-scale cutoff (noise)
llong : large-scale cutoff
for both lshort and llong:
give a tuple value for different sizes per dimension
give int value for same value for all dimensions
when 2*lshort >= llong, no noise filtering is applied
threshold : float or integer
By default, 1 for integer images and 1/256. for float images.
Returns
-------
ndarray, the bandpassed image
"""
lshort = validate_tuple(lshort, image.ndim)
llong = validate_tuple(llong, image.ndim)
if np.any([x*2 >= y for (x, y) in zip(lshort, llong)]):
raise ValueError("The smoothing length scale must be more" +
"than twice the noise length scale.")
if threshold is None:
if np.issubdtype(image.dtype, np.integer):
threshold = 1
else:
threshold = 1/256.
# Perform a rolling average (boxcar) with kernel size = 2*llong + 1
boxcar = np.asarray(image)
for (axis, size) in enumerate(llong):
boxcar = uniform_filter1d(boxcar, size*2+1, axis, mode='nearest',
cval=0)
# Perform a gaussian filter
gaussian = ifftn(fourier_gaussian(fftn(image), lshort)).real
result = gaussian - boxcar
return np.where(result > threshold, result, 0)
def scalefactor_to_gamut(image, original_dtype):
return np.iinfo(original_dtype).max / image.max()
def scale_to_gamut(image, original_dtype, scale_factor=None):
if scale_factor is None:
scale_factor = scalefactor_to_gamut(image, original_dtype)
scaled = (scale_factor * image.clip(min=0.)).astype(original_dtype)
return scaled
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/preprocessing.py",
"copies": "1",
"size": "3100",
"license": "bsd-3-clause",
"hash": 5275641183007240000,
"line_mean": 33.8314606742,
"line_max": 78,
"alpha_frac": 0.6432258065,
"autogenerated": false,
"ratio": 3.6904761904761907,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4833701996976191,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from scipy.ndimage import morphology
from pandas import DataFrame
from .preprocessing import bandpass
from .masks import binary_mask, x_squared_masks
from .utils import memo, validate_tuple
def roi(image, diameter, threshold=None, image_bandpassed=None):
"""Return a mask selecting the neighborhoods of bright regions.
See Biophysical journal 88(1) 623-638 Figure C.
Parameters
----------
image : ndarray
diameter : feature size used for centroid identification
threshold : number, optional
image_bandpassed : ndarray, optional
Returns
-------
boolean ndarray, True around bright regions
"""
diameter = validate_tuple(diameter, image.ndim)
if image_bandpassed is None:
image_bandpassed = bandpass(image, 1, tuple([d + 1 for d in diameter]),
threshold)
structure = binary_mask(tuple([int(d)//2 for d in diameter]), image.ndim)
signal_mask = morphology.binary_dilation(image_bandpassed,
structure=structure)
return signal_mask
def measure_noise(image, diameter, threshold, image_bandpassed=None):
"Compute the standard deviation of the dark pixels outside the signal."
signal_mask = roi(image, diameter, threshold, image_bandpassed)
return image[~signal_mask].mean(), image[~signal_mask].std()
@memo
def _root_sum_x_squared(radius, ndim):
"Returns the root of the sum of all x^2 inside the mask for each dim."
masks = x_squared_masks(radius, ndim)
r2 = np.sum(masks, axis=tuple(range(1, ndim + 1))) # each ax except first
return np.sqrt(r2)
def _static_error(mass, noise, radius, noise_size):
coord_moments = _root_sum_x_squared(radius, len(radius))
N_S = noise / mass
if np.all(radius[1:] == radius[:-1]) and \
np.all(noise_size[1:] == noise_size[:-1]):
ep = N_S * noise_size[0] * coord_moments[0]
else:
ep = N_S[:, np.newaxis] * \
(np.array(noise_size) * np.array(coord_moments))[np.newaxis, :]
return ep
def static_error(features, noise, diameter, noise_size=1, ndim=2):
"""Compute the uncertainty in particle position ("the static error").
Parameters
----------
features : DataFrame of features
The feature dataframe should have a `mass` column that is already
background corrected.
noise : number or DataFrame having `noise` column, indexed on `frame`
standard deviation of the noise
diameter : number or tuple, feature diameter used to locate centroids
noise_size : noise correlation length, may be tuple-valued
ndim : number of image dimensions, default 2
if diameter is tuple-valued then its length will override ndim
Returns
-------
DataFrame of static error estimates, indexed like the features.
When either radius or noise_size are anisotropic, the returned DataFrame
contains one column for each dimension.
Where uncertainty estimation fails, NaN is returned.
Note
----
This is an adjusted version of the process described by Thierry Savin and
Patrick S. Doyle in their paper "Static and Dynamic Errors in Particle
Tracking Microrheology," Biophysical Journal 88(1) 623-638.
Instead of measuring the peak intensity of the feature and calculating the
total intensity (assuming a certain feature shape), the total intensity
(=mass) is summed directly from the data. This quantity is more robust
to noise and gives a better estimate of the static error.
In addition, the sum of squared coordinates is calculated by taking the
discrete sum instead of taking the continuous limit and integrating. This
makes it possible to generalize this analysis to anisotropic masks.
"""
if hasattr(diameter, '__iter__'):
ndim = len(diameter)
noise_size = validate_tuple(noise_size, ndim)[::-1]
diameter = validate_tuple(diameter, ndim)[::-1]
radius = tuple([d // 2 for d in diameter])
if np.isscalar(noise):
ep = _static_error(features['mass'], noise, radius, noise_size)
else:
assert 'noise' in noise
temp = features.join(noise, on='frame')
ep = _static_error(temp['mass'], temp['noise'], radius, noise_size)
ep = ep.where(ep > 0, np.nan)
if ep.ndim == 1:
ep.name = 'ep'
elif ep.ndim == 2:
if ndim < 4:
coord_columns = ['ep_x', 'ep_y', 'ep_z'][:ndim]
else:
coord_columns = map(lambda i: 'ep_x' + str(i), range(ndim))
ep = DataFrame(ep, columns=coord_columns, index=features.index)
return ep
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/uncertainty.py",
"copies": "1",
"size": "4763",
"license": "bsd-3-clause",
"hash": 4732848947774414000,
"line_mean": 37.104,
"line_max": 79,
"alpha_frac": 0.6605080831,
"autogenerated": false,
"ratio": 3.920164609053498,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 125
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from scipy.spatial import cKDTree
from trackpy.utils import validate_tuple
def draw_point(image, pos, value):
image[tuple(pos)] = value
def feat_gauss(r, rg=0.333):
""" Gaussian at r = 0 with max value of 1. Its radius of gyration is
given by rg. """
return np.exp((r/rg)**2 * r.ndim/-2)
def feat_gauss_edge(r, value_at_edge=0.1):
""" Gaussian at r = 0 with max value of 1. Its value at r = 1 is given by
value_at_edge. """
return np.exp(np.log(value_at_edge)*r**2)
def feat_ring(r, r_at_max, value_at_edge=0.1):
""" Ring feature with a gaussian profile, centered at r_at_max. Its value
at r = 1 is given by value_at_edge."""
return np.exp(np.log(value_at_edge)*((r - r_at_max) / (1 - r_at_max))**2)
def feat_hat(r, disc_size, value_at_edge=0.1):
""" Solid disc of size disc_size, with Gaussian smoothed borders. """
mask = r > disc_size
spot = (~mask).astype(r.dtype)
spot[mask] = feat_ring(r[mask], disc_size, value_at_edge)
spot[~mask] = 1
return spot
def feat_step(r):
""" Solid disc. """
return r <= 1
def draw_feature(image, position, diameter, max_value=None,
feat_func=feat_gauss, ecc=None, **kwargs):
""" Draws a radial symmetric feature and adds it to the image at given
position. The given function will be evaluated at each pixel coordinate,
no averaging or convolution is done.
Parameters
----------
image : ndarray
image to draw features on
position : iterable
coordinates of feature position
diameter : number
defines the box that will be drawn on
max_value : number
maximum feature value. should be much less than the max value of the
image dtype, to avoid pixel wrapping at overlapping features
feat_func : function. Default: feat_gauss
function f(r) that takes an ndarray of radius values
and returns intensity values <= 1
ecc : positive number, optional
eccentricity of feature, defined only in 2D. Identical to setting
diameter to (diameter / (1 - ecc), diameter * (1 - ecc))
kwargs : keyword arguments are passed to feat_func
"""
if len(position) != image.ndim:
raise ValueError("Number of position coordinates should match image"
" dimensionality.")
diameter = validate_tuple(diameter, image.ndim)
if ecc is not None:
if len(diameter) != 2:
raise ValueError("Eccentricity is only defined in 2 dimensions")
if diameter[0] != diameter[1]:
raise ValueError("Diameter is already anisotropic; eccentricity is"
" not defined.")
diameter = (diameter[0] / (1 - ecc), diameter[1] * (1 - ecc))
radius = tuple([d / 2 for d in diameter])
if max_value is None:
max_value = np.iinfo(image.dtype).max - 3
rect = []
vectors = []
for (c, r, lim) in zip(position, radius, image.shape):
if (c >= lim) or (c < 0):
raise ValueError("Position outside of image.")
lower_bound = max(int(np.floor(c - r)), 0)
upper_bound = min(int(np.ceil(c + r + 1)), lim)
rect.append(slice(lower_bound, upper_bound))
vectors.append(np.arange(lower_bound - c, upper_bound - c) / r)
coords = np.meshgrid(*vectors, indexing='ij', sparse=True)
r = np.sqrt(np.sum(np.array(coords)**2, axis=0))
spot = max_value * feat_func(r, **kwargs)
image[rect] += spot.astype(image.dtype)
def gen_random_locations(shape, count, margin=0):
""" Generates `count` number of positions within `shape`. If a `margin` is
given, positions will be inside this margin. Margin may be tuple-valued.
"""
margin = validate_tuple(margin, len(shape))
np.random.seed(0)
pos = [np.random.randint(round(m), round(s - m), count)
for (s, m) in zip(shape, margin)]
return np.array(pos).T
def eliminate_overlapping_locations(f, separation):
""" Makes sure that no position is within `separation` from each other, by
deleting one of the that are to close to each other.
"""
separation = validate_tuple(separation, f.shape[1])
assert np.greater(separation, 0).all()
# Rescale positions, so that pairs are identified below a distance of 1.
f = f / separation
while True:
duplicates = cKDTree(f, 30).query_pairs(1)
if len(duplicates) == 0:
break
to_drop = []
for pair in duplicates:
to_drop.append(pair[1])
f = np.delete(f, to_drop, 0)
return f * separation
def gen_nonoverlapping_locations(shape, count, separation, margin=0):
""" Generates `count` number of positions within `shape`, that have minimum
distance `separation` from each other. The number of positions returned may
be lower than `count`, because positions too close to each other will be
deleted. If a `margin` is given, positions will be inside this margin.
Margin may be tuple-valued.
"""
positions = gen_random_locations(shape, count, margin)
return eliminate_overlapping_locations(positions, separation)
def draw_spots(shape, positions, diameter, noise_level=0, bitdepth=8,
feat_func=feat_gauss, ecc=None, **kwargs):
""" Generates an image with features at given positions. A feature with
position x will be centered around pixel x. In other words, the origin of
the output image is located at the center of pixel (0, 0).
Parameters
----------
shape : tuple of int
the shape of the produced image
positions : iterable of tuples
an iterable of positions
diameter : number or tuple
the sizes of the box that will be used per feature. The actual feature
'size' is determined by feat_func and kwargs given to feat_func.
noise_level : int, default: 0
white noise will be generated up to this level
bitdepth : int, default: 8
the desired bitdepth of the image (<=32 bits)
feat_func : function, default: feat_gauss
function f(r) that takes an ndarray of radius values
and returns intensity values <= 1
ecc : positive number, optional
eccentricity of feature, defined only in 2D. Identical to setting
diameter to (diameter / (1 - ecc), diameter * (1 - ecc))
kwargs : keyword arguments are passed to feat_func
"""
if bitdepth <= 8:
dtype = np.uint8
internaldtype = np.uint16
elif bitdepth <= 16:
dtype = np.uint16
internaldtype = np.uint32
elif bitdepth <= 32:
dtype = np.uint32
internaldtype = np.uint64
else:
raise ValueError('Bitdepth should be <= 32')
np.random.seed(0)
image = np.random.randint(0, noise_level + 1, shape).astype(internaldtype)
for pos in positions:
draw_feature(image, pos, diameter, max_value=2**bitdepth - 1,
feat_func=feat_func, ecc=ecc, **kwargs)
return image.clip(0, 2**bitdepth - 1).astype(dtype)
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/artificial.py",
"copies": "2",
"size": "7155",
"license": "bsd-3-clause",
"hash": 8591313168840874000,
"line_mean": 38.0983606557,
"line_max": 79,
"alpha_frac": 0.6359189378,
"autogenerated": false,
"ratio": 3.67299794661191,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.530891688441191,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .try_numba import try_numba_autojit
@try_numba_autojit(nopython=True)
def _numba_refine_2D(raw_image, image, radiusY, radiusX, coords, N,
max_iterations, shapeY, shapeX, maskY, maskX, N_mask,
results):
SHIFT_THRESH = 0.6
GOOD_ENOUGH_THRESH = 0.01
# Column indices into the 'results' array
MASS_COL = 2
upper_boundY = shapeY - radiusY - 1
upper_boundX = shapeX - radiusX - 1
for feat in range(N):
# Define the circular neighborhood of (x, y).
coordY = coords[feat, 0]
coordX = coords[feat, 1]
cm_nY = 0.
cm_nX = 0.
squareY = int(round(coordY)) - radiusY
squareX = int(round(coordX)) - radiusX
mass_ = 0.0
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nY /= mass_
cm_nX /= mass_
cm_iY = cm_nY - radiusY + coordY
cm_iX = cm_nX - radiusX + coordX
allow_moves = True
for iteration in range(max_iterations):
off_centerY = cm_nY - radiusY
off_centerX = cm_nX - radiusX
if (abs(off_centerY) < GOOD_ENOUGH_THRESH and
abs(off_centerX) < GOOD_ENOUGH_THRESH):
break # Go to next feature
# If we're off by more than half a pixel in any direction, move.
do_move = False
if allow_moves and (abs(off_centerY) > SHIFT_THRESH or
abs(off_centerX) > SHIFT_THRESH):
do_move = True
if do_move:
# In here, coord is an integer.
new_coordY = int(round(coordY))
new_coordX = int(round(coordX))
oc = off_centerY
if oc > SHIFT_THRESH:
new_coordY += 1
elif oc < - SHIFT_THRESH:
new_coordY -= 1
oc = off_centerX
if oc > SHIFT_THRESH:
new_coordX += 1
elif oc < - SHIFT_THRESH:
new_coordX -= 1
# Don't move outside the image!
if new_coordY < radiusY:
new_coordY = radiusY
if new_coordX < radiusX:
new_coordX = radiusX
if new_coordY > upper_boundY:
new_coordY = upper_boundY
if new_coordX > upper_boundX:
new_coordX = upper_boundX
# Update slice to shifted position.
squareY = new_coordY - radiusY
squareX = new_coordX - radiusX
cm_nY = 0.
cm_nX = 0.
# If we're off by less than half a pixel, interpolate.
else:
break
# TODO Implement this for numba.
# Remember to zero cm_n somewhere in here.
# Here, coord is a float. We are off the grid.
# neighborhood = ndimage.shift(neighborhood, -off_center,
# order=2, mode='constant', cval=0)
# new_coord = np.float_(coord) + off_center
# Disallow any whole-pixels moves on future iterations.
# allow_moves = False
# cm_n was re-zeroed above in an unrelated loop
mass_ = 0.
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nY /= mass_
cm_nX /= mass_
cm_iY = cm_nY - radiusY + new_coordY
cm_iX = cm_nX - radiusX + new_coordX
coordY = new_coordY
coordX = new_coordX
# matplotlib and ndimage have opposite conventions for xy <-> yx.
results[feat, 0] = cm_iX
results[feat, 1] = cm_iY
# Characterize the neighborhood of our final centroid.
mass_ = 0.
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
mass_ += px
results[feat, MASS_COL] = mass_
return 0 # Unused
@try_numba_autojit(nopython=True)
def _numba_refine_2D_c(raw_image, image, radiusY, radiusX, coords, N,
max_iterations, shapeY, shapeX, maskY,
maskX, N_mask, r2_mask, cmask, smask, results):
SHIFT_THRESH = 0.6
GOOD_ENOUGH_THRESH = 0.01
# Column indices into the 'results' array
MASS_COL = 2
RG_COL = 3
ECC_COL = 4
SIGNAL_COL = 5
RAW_MASS_COL = 6
upper_boundY = shapeY - radiusY - 1
upper_boundX = shapeX - radiusX - 1
for feat in range(N):
# Define the circular neighborhood of (x, y).
coordY = coords[feat, 0]
coordX = coords[feat, 1]
cm_nY = 0.
cm_nX = 0.
squareY = int(round(coordY)) - radiusY
squareX = int(round(coordX)) - radiusX
mass_ = 0.0
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nY /= mass_
cm_nX /= mass_
cm_iY = cm_nY - radiusY + coordY
cm_iX = cm_nX - radiusX + coordX
allow_moves = True
for iteration in range(max_iterations):
off_centerY = cm_nY - radiusY
off_centerX = cm_nX - radiusX
if (abs(off_centerY) < GOOD_ENOUGH_THRESH and
abs(off_centerX) < GOOD_ENOUGH_THRESH):
break # Go to next feature
# If we're off by more than half a pixel in any direction, move.
do_move = False
if allow_moves and (abs(off_centerY) > SHIFT_THRESH or
abs(off_centerX) > SHIFT_THRESH):
do_move = True
if do_move:
# In here, coord is an integer.
new_coordY = int(round(coordY))
new_coordX = int(round(coordX))
oc = off_centerY
if oc > SHIFT_THRESH:
new_coordY += 1
elif oc < - SHIFT_THRESH:
new_coordY -= 1
oc = off_centerX
if oc > SHIFT_THRESH:
new_coordX += 1
elif oc < - SHIFT_THRESH:
new_coordX -= 1
# Don't move outside the image!
if new_coordY < radiusY:
new_coordY = radiusY
if new_coordX < radiusX:
new_coordX = radiusX
if new_coordY > upper_boundY:
new_coordY = upper_boundY
if new_coordX > upper_boundX:
new_coordX = upper_boundX
# Update slice to shifted position.
squareY = new_coordY - radiusY
squareX = new_coordX - radiusX
cm_nY = 0.
cm_nX = 0.
# If we're off by less than half a pixel, interpolate.
else:
break
# TODO Implement this for numba.
# Remember to zero cm_n somewhere in here.
# Here, coord is a float. We are off the grid.
# neighborhood = ndimage.shift(neighborhood, -off_center,
# order=2, mode='constant', cval=0)
# new_coord = np.float_(coord) + off_center
# Disallow any whole-pixels moves on future iterations.
# allow_moves = False
# cm_n was re-zeroed above in an unrelated loop
mass_ = 0.
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nY /= mass_
cm_nX /= mass_
cm_iY = cm_nY - radiusY + new_coordY
cm_iX = cm_nX - radiusX + new_coordX
coordY = new_coordY
coordX = new_coordX
# matplotlib and ndimage have opposite conventions for xy <-> yx.
results[feat, 0] = cm_iX
results[feat, 1] = cm_iY
# Characterize the neighborhood of our final centroid.
mass_ = 0.
raw_mass_ = 0.
Rg_ = 0.
ecc1 = 0.
ecc2 = 0.
signal_ = 0.
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
mass_ += px
Rg_ += r2_mask[i]*px
ecc1 += cmask[i]*px
ecc2 += smask[i]*px
raw_mass_ += raw_image[squareY + maskY[i],
squareX + maskX[i]]
if px > signal_:
signal_ = px
results[feat, RG_COL] = np.sqrt(Rg_/mass_)
results[feat, MASS_COL] = mass_
center_px = image[squareY + radiusY, squareX + radiusX]
results[feat, ECC_COL] = np.sqrt(ecc1**2 + ecc2**2) / (mass_ - center_px + 1e-6)
results[feat, SIGNAL_COL] = signal_
results[feat, RAW_MASS_COL] = raw_mass_
return 0 # Unused
@try_numba_autojit(nopython=True)
def _numba_refine_2D_c_a(raw_image, image, radiusY, radiusX, coords, N,
max_iterations, shapeY, shapeX, maskY,
maskX, N_mask, y2_mask, x2_mask, cmask, smask,
results):
SHIFT_THRESH = 0.6
GOOD_ENOUGH_THRESH = 0.01
# Column indices into the 'results' array
MASS_COL = 2
RGX_COL = 3
RGY_COL = 4
ECC_COL = 5
SIGNAL_COL = 6
RAW_MASS_COL = 7
upper_boundY = shapeY - radiusY - 1
upper_boundX = shapeX - radiusX - 1
for feat in range(N):
# Define the circular neighborhood of (x, y).
coordY = coords[feat, 0]
coordX = coords[feat, 1]
cm_nY = 0.
cm_nX = 0.
squareY = int(round(coordY)) - radiusY
squareX = int(round(coordX)) - radiusX
mass_ = 0.0
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nY /= mass_
cm_nX /= mass_
cm_iY = cm_nY - radiusY + coordY
cm_iX = cm_nX - radiusX + coordX
allow_moves = True
for iteration in range(max_iterations):
off_centerY = cm_nY - radiusY
off_centerX = cm_nX - radiusX
if (abs(off_centerY) < GOOD_ENOUGH_THRESH and
abs(off_centerX) < GOOD_ENOUGH_THRESH):
break # Go to next feature
# If we're off by more than half a pixel in any direction, move.
do_move = False
if allow_moves and (abs(off_centerY) > SHIFT_THRESH or
abs(off_centerX) > SHIFT_THRESH):
do_move = True
if do_move:
# In here, coord is an integer.
new_coordY = int(round(coordY))
new_coordX = int(round(coordX))
oc = off_centerY
if oc > SHIFT_THRESH:
new_coordY += 1
elif oc < - SHIFT_THRESH:
new_coordY -= 1
oc = off_centerX
if oc > SHIFT_THRESH:
new_coordX += 1
elif oc < - SHIFT_THRESH:
new_coordX -= 1
# Don't move outside the image!
if new_coordY < radiusY:
new_coordY = radiusY
if new_coordX < radiusX:
new_coordX = radiusX
if new_coordY > upper_boundY:
new_coordY = upper_boundY
if new_coordX > upper_boundX:
new_coordX = upper_boundX
# Update slice to shifted position.
squareY = new_coordY - radiusY
squareX = new_coordX - radiusX
cm_nY = 0.
cm_nX = 0.
# If we're off by less than half a pixel, interpolate.
else:
break
# TODO Implement this for numba.
# Remember to zero cm_n somewhere in here.
# Here, coord is a float. We are off the grid.
# neighborhood = ndimage.shift(neighborhood, -off_center,
# order=2, mode='constant', cval=0)
# new_coord = np.float_(coord) + off_center
# Disallow any whole-pixels moves on future iterations.
# allow_moves = False
# cm_n was re-zeroed above in an unrelated loop
mass_ = 0.
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nY /= mass_
cm_nX /= mass_
cm_iY = cm_nY - radiusY + new_coordY
cm_iX = cm_nX - radiusX + new_coordX
coordY = new_coordY
coordX = new_coordX
# matplotlib and ndimage have opposite conventions for xy <-> yx.
results[feat, 0] = cm_iX
results[feat, 1] = cm_iY
# Characterize the neighborhood of our final centroid.
mass_ = 0.
raw_mass_ = 0.
RgY = 0.
RgX = 0.
ecc1 = 0.
ecc2 = 0.
signal_ = 0.
for i in range(N_mask):
px = image[squareY + maskY[i],
squareX + maskX[i]]
mass_ += px
RgY += y2_mask[i]*px
RgX += x2_mask[i]*px
ecc1 += cmask[i]*px
ecc2 += smask[i]*px
raw_mass_ += raw_image[squareY + maskY[i],
squareX + maskX[i]]
if px > signal_:
signal_ = px
results[feat, RGY_COL] = np.sqrt(RgY/mass_)
results[feat, RGX_COL] = np.sqrt(RgX/mass_)
results[feat, MASS_COL] = mass_
center_px = image[squareY + radiusY, squareX + radiusX]
results[feat, ECC_COL] = np.sqrt(ecc1**2 + ecc2**2) / (mass_ - center_px + 1e-6)
results[feat, SIGNAL_COL] = signal_
results[feat, RAW_MASS_COL] = raw_mass_
return 0 # Unused
@try_numba_autojit(nopython=True)
def _numba_refine_3D(raw_image, image, radiusZ, radiusY, radiusX, coords, N,
max_iterations, characterize, shapeZ, shapeY, shapeX,
maskZ, maskY, maskX, N_mask, r2_mask, z2_mask, y2_mask,
x2_mask, results):
SHIFT_THRESH = 0.6
GOOD_ENOUGH_THRESH = 0.01
# Column indices into the 'results' array
MASS_COL = 3
isotropic = (radiusX == radiusY and radiusX == radiusZ)
if isotropic:
RG_COL = 4
ECC_COL = 5
SIGNAL_COL = 6
RAW_MASS_COL = 7
else:
RGX_COL = 4
RGY_COL = 5
RGZ_COL = 6
ECC_COL = 7
SIGNAL_COL = 8
RAW_MASS_COL = 9
upper_boundZ = shapeZ - radiusZ - 1
upper_boundY = shapeY - radiusY - 1
upper_boundX = shapeX - radiusX - 1
for feat in range(N):
# Define the neighborhood of (x, y, z).
coordZ = coords[feat, 0]
coordY = coords[feat, 1]
coordX = coords[feat, 2]
cm_nZ = 0.
cm_nY = 0.
cm_nX = 0.
squareZ = int(round(coordZ)) - radiusZ
squareY = int(round(coordY)) - radiusY
squareX = int(round(coordX)) - radiusX
mass_ = 0.0
for i in range(N_mask):
px = image[squareZ + maskZ[i],
squareY + maskY[i],
squareX + maskX[i]]
cm_nZ += px*maskZ[i]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nZ /= mass_
cm_nY /= mass_
cm_nX /= mass_
cm_iZ = cm_nZ - radiusZ + coordZ
cm_iY = cm_nY - radiusY + coordY
cm_iX = cm_nX - radiusX + coordX
allow_moves = True
for iteration in range(max_iterations):
off_centerZ = cm_nZ - radiusZ
off_centerY = cm_nY - radiusY
off_centerX = cm_nX - radiusX
if (abs(off_centerZ) < GOOD_ENOUGH_THRESH and
abs(off_centerY) < GOOD_ENOUGH_THRESH and
abs(off_centerX) < GOOD_ENOUGH_THRESH):
break # Go to next feature
# If we're off by more than half a pixel in any direction, move.
do_move = False
if allow_moves and (abs(off_centerZ) > SHIFT_THRESH or
abs(off_centerY) > SHIFT_THRESH or
abs(off_centerX) > SHIFT_THRESH):
do_move = True
if do_move:
# In here, coord is an integer.
new_coordZ = int(round(coordZ))
new_coordY = int(round(coordY))
new_coordX = int(round(coordX))
oc = off_centerZ
if oc > SHIFT_THRESH:
new_coordZ += 1
elif oc < - SHIFT_THRESH:
new_coordZ -= 1
oc = off_centerY
if oc > SHIFT_THRESH:
new_coordY += 1
elif oc < - SHIFT_THRESH:
new_coordY -= 1
oc = off_centerX
if oc > SHIFT_THRESH:
new_coordX += 1
elif oc < - SHIFT_THRESH:
new_coordX -= 1
# Don't move outside the image!
if new_coordZ < radiusZ:
new_coordZ = radiusZ
if new_coordY < radiusY:
new_coordY = radiusY
if new_coordX < radiusX:
new_coordX = radiusX
if new_coordZ > upper_boundZ:
new_coordZ = upper_boundZ
if new_coordY > upper_boundY:
new_coordY = upper_boundY
if new_coordX > upper_boundX:
new_coordX = upper_boundX
# Update slice to shifted position.
squareZ = new_coordZ - radiusZ
squareY = new_coordY - radiusY
squareX = new_coordX - radiusX
cm_nZ = 0.
cm_nY = 0.
cm_nX = 0.
# If we're off by less than half a pixel, interpolate.
else:
break
# TODO Implement this for numba.
# Remember to zero cm_n somewhere in here.
# Here, coord is a float. We are off the grid.
# neighborhood = ndimage.shift(neighborhood, -off_center,
# order=2, mode='constant', cval=0)
# new_coord = np.float_(coord) + off_center
# Disallow any whole-pixels moves on future iterations.
# allow_moves = False
# cm_n was re-zeroed above in an unrelated loop
mass_ = 0.
for i in range(N_mask):
px = image[squareZ + maskZ[i],
squareY + maskY[i],
squareX + maskX[i]]
cm_nZ += px*maskZ[i]
cm_nY += px*maskY[i]
cm_nX += px*maskX[i]
mass_ += px
cm_nZ /= mass_
cm_nY /= mass_
cm_nX /= mass_
cm_iZ = cm_nZ - radiusZ + new_coordZ
cm_iY = cm_nY - radiusY + new_coordY
cm_iX = cm_nX - radiusX + new_coordX
coordZ = new_coordZ
coordY = new_coordY
coordX = new_coordX
# matplotlib and ndimage have opposite conventions for xy <-> yx.
results[feat, 0] = cm_iX
results[feat, 1] = cm_iY
results[feat, 2] = cm_iZ
# Characterize the neighborhood of our final centroid.
mass_ = 0.
raw_mass_ = 0.
Rg_ = 0.
RgZ = 0.
RgY = 0.
RgX = 0.
signal_ = 0.
if not characterize:
for i in range(N_mask):
px = image[squareZ + maskZ[i],
squareY + maskY[i],
squareX + maskX[i]]
mass_ += px
elif isotropic:
for i in range(N_mask):
px = image[squareZ + maskZ[i],
squareY + maskY[i],
squareX + maskX[i]]
mass_ += px
Rg_ += r2_mask[i]*px
raw_mass_ += raw_image[squareZ + maskZ[i],
squareY + maskY[i],
squareX + maskX[i]]
if px > signal_:
signal_ = px
results[feat, RG_COL] = np.sqrt(Rg_/mass_)
else:
for i in range(N_mask):
px = image[squareZ + maskZ[i],
squareY + maskY[i],
squareX + maskX[i]]
mass_ += px
RgZ += y2_mask[i]*px
RgY += y2_mask[i]*px
RgX += x2_mask[i]*px
raw_mass_ += raw_image[squareZ + maskZ[i],
squareY + maskY[i],
squareX + maskX[i]]
if px > signal_:
signal_ = px
results[feat, RGZ_COL] = np.sqrt(RgZ/mass_)
results[feat, RGY_COL] = np.sqrt(RgY/mass_)
results[feat, RGX_COL] = np.sqrt(RgX/mass_)
results[feat, MASS_COL] = mass_
if characterize:
results[feat, SIGNAL_COL] = signal_
results[feat, ECC_COL] = np.nan
results[feat, RAW_MASS_COL] = raw_mass_
return 0 # Unused
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/feature_numba.py",
"copies": "1",
"size": "22424",
"license": "bsd-3-clause",
"hash": -4317465159195806700,
"line_mean": 35.284789644,
"line_max": 88,
"alpha_frac": 0.45932929,
"autogenerated": false,
"ratio": 3.699719518231315,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46590488082313153,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .utils import memo, validate_tuple
__all__ = ['binary_mask', 'r_squared_mask', 'cosmask', 'sinmask',
'theta_mask']
@memo
def binary_mask(radius, ndim):
"Elliptical mask in a rectangular array"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
return sum(r) <= 1
@memo
def N_binary_mask(radius, ndim):
return np.sum(binary_mask(radius,ndim))
@memo
def r_squared_mask(radius, ndim):
"Mask with values r^2 inside radius and 0 outside"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
r2 = np.sum(coords**2, 0).astype(int)
r2[sum(r) > 1] = 0
return r2
@memo
def x_squared_masks(radius, ndim):
"Returns ndim masks with values x^2 inside radius and 0 outside"
radius = validate_tuple(radius, ndim)
points = [np.arange(-rad, rad + 1) for rad in radius]
if len(radius) > 1:
coords = np.array(np.meshgrid(*points, indexing="ij"))
else:
coords = np.array([points[0]])
r = [(coord/rad)**2 for (coord, rad) in zip(coords, radius)]
masks = np.asarray(coords**2, dtype=int)
masks[:, sum(r) > 1] = 0
return masks
@memo
def theta_mask(radius):
"""Mask of values giving angular position relative to center. The angle is
defined according to ISO standards in which the angle is measured counter-
clockwise from the x axis, measured in a normal coordinate system with y-
axis pointing up and x axis pointing right.
In other words: for increasing angle, the coordinate moves counterclockwise
around the feature center starting on the right side.
However, in most images, the y-axis will point down so that the coordinate
will appear to move clockwise around the feature center.
"""
# 2D only
radius = validate_tuple(radius, 2)
tan_of_coord = lambda y, x: np.arctan2(y - radius[0], x - radius[1])
return np.fromfunction(tan_of_coord, [r * 2 + 1 for r in radius])
@memo
def sinmask(radius):
"Sin of theta_mask"
return np.sin(2*theta_mask(radius))
@memo
def cosmask(radius):
"Sin of theta_mask"
return np.cos(2*theta_mask(radius))
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/masks.py",
"copies": "1",
"size": "2735",
"license": "bsd-3-clause",
"hash": -8286278224110137000,
"line_mean": 30.4367816092,
"line_max": 79,
"alpha_frac": 0.6449725777,
"autogenerated": false,
"ratio": 3.4059775840597757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9537362466192865,
"avg_score": 0.002717539113382288,
"num_lines": 87
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from .utils import validate_tuple
def get_slice(coords, shape, radius):
"""Returns the slice and origin that belong to ``slice_image``"""
# interpret parameters
ndim = len(shape)
radius = validate_tuple(radius, ndim)
coords = np.atleast_2d(np.round(coords).astype(np.int))
# drop features that have no pixels inside the image
in_bounds = np.array([(coords[:, i] >= -r) & (coords[:, i] < sh + r)
for i, sh, r in zip(range(ndim), shape, radius)])
coords = coords[np.all(in_bounds, axis=0)]
# return if no coordinates are left
if len(coords) == 0:
return [slice(None, 0)] * ndim, None
# calculate the box
lower = coords.min(axis=0) - radius
upper = coords.max(axis=0) + radius + 1
# calculate the slices
origin = [None] * ndim
slices = [None] * ndim
for i, sh, low, up in zip(range(ndim), shape, lower, upper):
lower_bound_trunc = max(0, low)
upper_bound_trunc = min(sh, up)
slices[i] = slice(lower_bound_trunc, upper_bound_trunc)
origin[i] = lower_bound_trunc
return slices, origin
def slice_image(pos, image, radius):
""" Slice a box around a group of features from an image.
The box is the smallest box that contains all coordinates up to `radius`
from any coordinate.
Parameters
----------
image : ndarray
The image that will be sliced
pos : iterable
An iterable (e.g. list or ndarray) that contains the feature positions
radius : number or tuple of numbers
Defines the size of the slice. Every pixel that has a distance lower or
equal to `radius` to a feature position is included.
Returns
-------
tuple of:
- the sliced image
- the coordinate of the slice origin (top-left pixel)
"""
slices, origin = get_slice(pos, image.shape, radius)
return image[slices], origin
def get_mask(pos, shape, radius, include_edge=True, return_masks=False):
""" Create a binary mask that masks pixels farther than radius to all
given feature positions.
Optionally returns the masks that recover the individual feature pixels from
a masked image, as follows: ``image[mask][masks_single[i]]``
Parameters
----------
pos : ndarray (N x 2 or N x 3)
Feature positions
shape : tuple
The shape of the image
radius : number or tuple
Radius of the individual feature masks
include_edge : boolean, optional
Determine whether pixels at exactly one radius from a position are
included. Default True.
return_masks : boolean, optional
Also return masks that recover the single features from a masked image.
Default False.
Returns
-------
ndarray containing a binary mask
if return_masks==True, returns a tuple of [masks, masks_singles]
"""
ndim = len(shape)
radius = validate_tuple(radius, ndim)
pos = np.atleast_2d(pos)
if include_edge:
in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) <= 1
for p in pos]
else:
in_mask = [np.sum(((np.indices(shape).T - p) / radius)**2, -1) < 1
for p in pos]
mask_total = np.any(in_mask, axis=0).T
if return_masks:
masks_single = np.empty((len(pos), mask_total.sum()), dtype=np.bool)
for i, _in_mask in enumerate(in_mask):
masks_single[i] = _in_mask.T[mask_total]
return mask_total, masks_single
else:
return mask_total
| {
"repo_name": "caspervdw/circletracking",
"path": "circletracking/masks.py",
"copies": "1",
"size": "3673",
"license": "bsd-3-clause",
"hash": -8459566437243349000,
"line_mean": 33.980952381,
"line_max": 80,
"alpha_frac": 0.6210182412,
"autogenerated": false,
"ratio": 3.882663847780127,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003682088980127,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from .utils import validate_tuple, guess_pos_columns
from functools import wraps
def is_rgb(image, ndim=2, allow_rgba=True):
shape = image.shape
return len(shape) == ndim + 1 and (shape[-1] == 3 or
(image.shape[-1] == 4 and allow_rgba))
def wrap_imshow(func):
@wraps(func)
def wrapper(*args, **kwargs):
normed = kwargs.pop('normed', True)
if kwargs.get('ax') is None:
kwargs['ax'] = plt.gca()
ax = func(*args, **kwargs)
return adjust_imshow(ax, normed)
return wrapper
def wrap_imshow3d(func):
@wraps(func)
def wrapper(*args, **kwargs):
aspect = kwargs.pop('aspect', 1.)
normed = kwargs.pop('normed', True)
spacing = kwargs.pop('spacing', 0.05)
if kwargs.get('axs') is None:
fig = plt.gcf()
# make square by adjusting height
w, h = fig.get_size_inches()
fig.set_size_inches(w, w)
kwargs['axs'] = fig.add_subplot(221), fig.add_subplot(222), \
fig.add_subplot(223), fig.add_subplot(224)
kwargs['axs'][3].set_visible(False)
axs = func(*args, **kwargs)
return adjust_imshow3d(axs, aspect, spacing, normed)
return wrapper
def invert_ax(ax, which='both', invert=True, auto=None):
"""Inverts the x and/or y axes of an axis object."""
# kwarg auto=None leaves autoscaling unchanged
if which not in ('x', 'y', 'both'):
raise ValueError("Parameter `which` must be one of {'x' | 'y' | 'both'}.")
if which == 'x' or which == 'both':
low, hi = ax.get_xlim()
if invert and hi > low:
ax.set_xlim(hi, low, auto=auto)
if not invert and low > hi:
ax.set_xlim(low, hi, auto=auto)
if which == 'y' or which == 'both':
low, hi = ax.get_ylim()
if invert and hi > low:
ax.set_ylim(hi, low, auto=auto)
if not invert and low > hi:
ax.set_ylim(low, hi, auto=auto)
return ax
from skimage.measure import find_contours
def get_visible_clim(ax):
"""Obtains the sliced image displayed on ax"""
try:
axim = ax.get_images()[0]
except IndexError:
return 0., 1.
sh_y, sh_x = axim.get_size()
ext_x_lo, ext_x_hi, ext_y_lo, ext_y_hi = axim.get_extent()
if ext_y_lo > ext_y_hi:
ext_y_lo, ext_y_hi = ext_y_hi, ext_y_lo
mpp = [(ext_y_hi - ext_y_lo) / sh_y,
(ext_x_hi - ext_x_lo) / sh_x]
origin = [ext_y_lo / mpp[0] + 0.5,
ext_x_lo / mpp[0] + 0.5]
x_lo, x_hi = sorted(ax.get_xlim())
y_lo, y_hi = sorted(ax.get_ylim())
slice_x = slice(max(int(round(x_lo / mpp[1] + 0.5 - origin[1])), 0),
min(int(round(x_hi / mpp[1] + 0.5 - origin[1])), sh_x))
slice_y = slice(max(int(round(y_lo / mpp[0] + 0.5 - origin[0])), 0),
min(int(round(y_hi / mpp[0] + 0.5 - origin[0])), sh_y))
im = axim.get_array()[slice_y, slice_x]
if im.size == 0:
return 0., 1.
return im.min(), im.max()
def norm_axesimage(ax, vmin, vmax):
try:
axim = ax.get_images()[0]
except IndexError:
return
im = axim.get_array()
if im.ndim == 3: # RGB, custom norm
if vmax - vmin > 0:
# the masked array may give underflowerror here
with np.errstate(under='ignore'):
axim.set_array((im - vmin) / (vmax - vmin))
axim.set_clim(0, 1) # this is actually ignored for RGB by mpl
else: # use built-in
axim.set_clim(vmin, vmax)
return axim
def adjust_imshow(ax, normed=True):
# disable autoscaling, use tight layout
ax.autoscale(False, 'both', tight=False)
# set aspect ratio
ax.set_aspect('equal', 'box')
# invert axes
invert_ax(ax, 'y', invert=True)
invert_ax(ax, 'x', invert=False)
# position the ticks
ax.xaxis.tick_top()
# hide grid and tickmarks
ax.tick_params(axis='both', which='both', length=0)
ax.grid(False)
# get maximum pixel values
if normed:
norm_axesimage(ax, *get_visible_clim(ax))
return ax
def adjust_imshow3d(axs, aspect=1., spacing=0.05, normed=True):
ax_xy, ax_zy, ax_zx, ax_extra = axs
# disable autoscaling
ax_xy.autoscale(False, 'both', tight=False)
ax_zy.autoscale(False, 'both', tight=False)
ax_zx.autoscale(False, 'both', tight=False)
# set aspect ratio
ax_xy.set_aspect('equal', 'box')
ax_zy.set_aspect(1/aspect, 'box')
ax_zx.set_aspect(aspect, 'box')
# invert axes
invert_ax(ax_xy, 'y', invert=True)
invert_ax(ax_xy, 'x', invert=False)
invert_ax(ax_zy, 'x', invert=False)
# get x, y, z limits
x_lo, x_hi = ax_xy.get_xlim()
y_hi, y_lo = ax_xy.get_ylim()
z_lo, z_hi = ax_zy.get_xlim()
# copy axes limits
ax_zy.set_ylim(y_hi, y_lo)
ax_zx.set_xlim(x_lo, x_hi)
ax_zx.set_ylim(z_hi, z_lo)
# make a gridspec
gs = gridspec.GridSpec(2, 2,
width_ratios=[x_hi - x_lo, aspect * (z_hi - z_lo)],
height_ratios=[y_hi - y_lo, aspect * (z_hi - z_lo)],
wspace=spacing, hspace=spacing)
ax_xy.set_position(gs[0, 0].get_position(ax_xy.figure))
ax_zx.set_position(gs[1, 0].get_position(ax_zx.figure))
ax_zy.set_position(gs[0, 1].get_position(ax_zy.figure))
ax_extra.set_position(gs[1, 1].get_position(ax_extra.figure))
# position and hide the correct ticks
ax_xy.xaxis.tick_top()
ax_xy.xaxis.set_label_position("top")
ax_zy.xaxis.tick_top()
ax_zy.xaxis.set_label_position("top")
plt.setp(ax_xy.get_xticklabels() + ax_xy.get_yticklabels() +
ax_zy.get_xticklabels() + ax_zx.get_yticklabels(),
visible=True)
plt.setp(ax_zy.get_yticklabels() + ax_zx.get_xticklabels(),
visible=False)
# hide grid and tickmarks
for ax in [ax_xy, ax_zx, ax_zy]:
ax.tick_params(axis='both', which='both', length=0)
ax.grid(False)
# get maximum pixel values
if normed:
vmin_xy, vmax_xy = get_visible_clim(ax_xy)
vmin_zy, vmax_zy = get_visible_clim(ax_zy)
vmin_zx, vmax_zx = get_visible_clim(ax_zx)
vmin = min(vmin_xy, vmin_zy, vmin_zx)
vmax = max(vmax_xy, vmax_zy, vmax_zx)
for ax in [ax_xy, ax_zy, ax_zx]:
norm_axesimage(ax, vmin, vmax)
return axs
@wrap_imshow
def imshow(image, ax=None, mpp=1., origin=(0, 0), ax_labels=False, **kwargs):
"""Show an image. Origin is in pixels."""
_imshow_style = dict(origin='lower', interpolation='nearest',
cmap=plt.cm.gray, aspect='equal')
_imshow_style.update(kwargs)
if not is_rgb(image, ndim=2):
try:
from pims import to_rgb
except ImportError:
raise ImportError("Imshow requires PIMS to display a non-RGB image")
image = to_rgb(image, kwargs.pop('colors', None), normed=False) / 255.
shape = image.shape[:2]
mpp = validate_tuple(mpp, ndim=2)
origin = validate_tuple(origin, ndim=2)
# extent is defined on the outer edges of the pixels
# we want the center of the topleft to intersect with the origin
extent = [(origin[1] - 0.5) * mpp[1],
(origin[1] + shape[1] - 0.5) * mpp[1],
(origin[0] - 0.5) * mpp[0],
(origin[0] + shape[0] - 0.5) * mpp[0]]
ax.imshow(image, extent=extent, **_imshow_style)
ax.set_xlim(extent[0], extent[1])
ax.set_ylim(extent[3], extent[2])
if ax_labels:
if mpp == 1.:
fmt = '{} [px]'
elif mpl.rcParams['text.usetex']:
fmt = r'{} [\textmu m]'
else:
fmt = r'{} [\xb5m]'
ax.set_xlabel(fmt.format('x'))
ax.set_ylabel(fmt.format('y'))
return ax
@wrap_imshow3d
def imshow3d(image3d, mode='max', center=None, mpp=1.,
origin=(0, 0, 0), axs=None, ax_labels=False, **kwargs):
"""Shows the xy, xz, and yz projections of a 3D image.
Parameters
----------
image3d : ndarray
mode : {'max' | 'slice'}
aspect : number
aspect ratio of pixel size z / xy. Default 1.
center : tuple
in pixels
mpp : tuple
microns per pixel
origin : tuple
coordinate of the (center of the) topleft pixel (in pixels)
spacing : number
spacing between images
axs : t
Returns
-------
fig, (ax_xy, ax_zy, ax_zx, ax_extra)
"""
imshow_style = dict(origin='lower', interpolation='nearest',
cmap=plt.cm.gray, aspect='auto')
imshow_style.update(kwargs)
if not is_rgb(image3d, ndim=3):
try:
from pims import to_rgb
except ImportError:
raise ImportError("Imshow requires PIMS to display a non-RGB image")
image3d = to_rgb(image3d, kwargs.pop('colors', None), normed=False) / 255.
shape = image3d.shape[:3]
mpp = validate_tuple(mpp, ndim=3)
origin = validate_tuple(origin, ndim=3)
ax_xy, ax_zy, ax_zx, ax_extra = axs
if mode == 'max':
image_xy = image3d.max(0)
image_zx = image3d.max(1)
image_zy = image3d.max(2)
elif mode == 'slice':
center_i = [int(round(c - o)) for c, o in zip(center, origin)]
center_i = [min(max(c, 0), sh - 1) for c, sh in zip(center_i, shape)]
image_xy = image3d[center_i[0], :, :]
image_zx = image3d[:, center_i[1], :]
image_zy = image3d[:, :, center_i[2]]
else:
raise ValueError('Unknown mode "{}"'.format(mode))
if image_zy.ndim == 3:
image_zy = np.transpose(image_zy, (1, 0, 2))
else:
image_zy = image_zy.T
# extent is defined on the outer edges of the pixels
# we want the center of the topleft to intersect with the origin
extent = [(origin[2] - 0.5) * mpp[2],
(origin[2] + shape[2] - 0.5) * mpp[2],
(origin[1] - 0.5) * mpp[1],
(origin[1] + shape[1] - 0.5) * mpp[1],
(origin[0] - 0.5) * mpp[0],
(origin[0] + shape[0] - 0.5) * mpp[0]]
extent_xy = extent[:4]
extent_zx = extent[:2] + extent[4:6]
extent_zy = extent[4:6] + extent[2:4]
ax_xy.imshow(image_xy, extent=extent_xy, **imshow_style)
ax_zx.imshow(image_zx, extent=extent_zx, **imshow_style)
ax_zy.imshow(image_zy, extent=extent_zy, **imshow_style)
ax_xy.set_xlim(extent[0], extent[1], auto=False)
ax_xy.set_ylim(extent[3], extent[2], auto=False)
ax_zy.set_xlim(extent[4], extent[5], auto=False)
ax_zy.set_ylim(extent[3], extent[2], auto=False)
ax_zx.set_xlim(extent[0], extent[1], auto=False)
ax_zx.set_ylim(extent[5], extent[4], auto=False)
if ax_labels:
if mpp == 1.:
fmt = '{} [px]'
elif mpl.rcParams['text.usetex']:
fmt = r'{} [\textmu m]'
else:
fmt = r'{} [\xb5m]'
ax_xy.set_xlabel(fmt.format('x'))
ax_xy.set_ylabel(fmt.format('y'))
ax_zy.set_xlabel(fmt.format('z'))
ax_zx.set_ylabel(fmt.format('z'))
return axs
@wrap_imshow
def annotate_ellipse(params, ax=None, crop_radius=1.2, **kwargs):
"""Annotates an ellipse on an image
Parameters
----------
params : tuple or dict
either (yr, xr, yc, xc) tuple
or dict with names ['yr', 'xr', 'yc', 'xc']
"""
from matplotlib.patches import Ellipse
ellipse_style = dict(ec='yellow', fill=False)
ellipse_style.update(kwargs)
if isinstance(params, tuple):
yr, xr, yc, xc = params
else:
yr = params['yr']
xr = params['xr']
yc = params['yc']
xc = params['xc']
ax.add_artist(Ellipse(xy=(xc, yc), width=xr*2, height=yr*2,
**ellipse_style))
# crop image around ellipse
ax.set_xlim(xc - crop_radius * xr, xc + crop_radius * xr)
ax.set_ylim(yc + crop_radius * yr, yc - crop_radius * yr)
return ax
@wrap_imshow3d
def annotate_ellipsoid(params, axs=None, crop_radius=1.2, **kwargs):
"""Annotates an ellipse on an image
Parameters
----------
params : tuple or dict
either (zr, yr, xr, zc, yc, xc) tuple
or dict with names ['zr', 'yr', 'xr', 'zc', 'yc', 'xc']
"""
from matplotlib.patches import Ellipse
ellipse_style = dict(ec='yellow', fill=False)
ellipse_style.update(kwargs)
ax_xy, ax_zy, ax_zx, ax_extra = axs
if isinstance(params, tuple):
zr, yr, xr, zc, yc, xc = params
else:
zr = params['zr']
yr = params['yr']
xr = params['xr']
zc = params['zc']
yc = params['yc']
xc = params['xc']
ax_xy.add_artist(Ellipse(xy=(xc, yc), width=xr*2, height=yr*2,
**ellipse_style))
ax_zy.add_artist(Ellipse(xy=(zc, yc), width=zr*2, height=yr*2,
**ellipse_style))
ax_zx.add_artist(Ellipse(xy=(xc, zc), width=xr*2, height=zr*2,
**ellipse_style))
# crop image around ellipse
ax_xy.set_xlim(xc - crop_radius * xr, xc + crop_radius * xr)
ax_xy.set_ylim(yc - crop_radius * yr, yc + crop_radius * yr)
ax_zy.set_xlim(zc - crop_radius * zr, zc + crop_radius * zr)
return axs
@wrap_imshow3d
def scatter3d(features, mode='all', center=None, mpp=1.,
axs=None, pos_columns=None, slice_thickness=1., **kwargs):
_kwargs = dict(markersize=15, markeredgewidth=2,
markerfacecolor='none', markeredgecolor='r',
marker='o', linestyle='none')
_kwargs.update(kwargs)
mpp = validate_tuple(mpp, ndim=3)
slice_thickness = validate_tuple(slice_thickness, ndim=3)
ax_xy, ax_zy, ax_zx, ax_extra = axs
if pos_columns is None:
pos_columns = guess_pos_columns(features)
coords = features[pos_columns].values * mpp
if mode == 'all':
feat_xy = coords[:, 2], coords[:, 1]
feat_zy = coords[:, 0], coords[:, 1]
feat_zx = coords[:, 2], coords[:, 0]
elif mode == 'slice':
masks = [(coords[:, i] >= center[i] - slice_thickness[i] / 2) &
(coords[:, i] <= center[i] + slice_thickness[i] / 2)
for i in range(3)]
feat_xy = coords[masks[0], 2], coords[masks[0], 1]
feat_zy = coords[masks[2], 0], coords[masks[2], 1]
feat_zx = coords[masks[1], 2], coords[masks[1], 0]
ax_xy.plot(*feat_xy, **_kwargs)
ax_zy.plot(*feat_zy, **_kwargs)
ax_zx.plot(*feat_zx, **_kwargs)
return axs
| {
"repo_name": "caspervdw/circletracking",
"path": "circletracking/plot.py",
"copies": "1",
"size": "14778",
"license": "bsd-3-clause",
"hash": -6016786950707281000,
"line_mean": 33.2083333333,
"line_max": 82,
"alpha_frac": 0.5576532684,
"autogenerated": false,
"ratio": 3.0382401315789473,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40958933999789476,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from scipy.spatial import cKDTree
def msd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=['x', 'y']):
"""Compute the mean displacement and mean squared displacement of one
trajectory over a range of time intervals.
Parameters
----------
traj : DataFrame with one trajectory, including columns frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
detail : See below. Default False.
Returns
-------
DataFrame([<x>, <y>, <x^2>, <y^2>, msd], index=t)
If detail is True, the DataFrame also contains a column N,
the estimated number of statistically independent measurements
that comprise the result at each lagtime.
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
See also
--------
imsd() and emsd()
"""
pos = traj.set_index('frame')[pos_columns]
t = traj['frame']
# Reindex with consecutive frames, placing NaNs in the gaps.
pos = pos.reindex(np.arange(pos.index[0], 1 + pos.index[-1]))
max_lagtime = min(max_lagtime, len(t)) # checking to be safe
lagtimes = 1 + np.arange(max_lagtime)
disp = pd.concat([pos.sub(pos.shift(lt)) for lt in lagtimes],
keys=lagtimes, names=['lagt', 'frames'])
results = mpp*disp.mean(level=0)
results.columns = ['<{}>'.format(p) for p in pos_columns]
results[['<{}^2>'.format(p) for p in pos_columns]] = mpp**2*(disp**2).mean(level=0)
results['msd'] = mpp**2*(disp**2).mean(level=0).sum(1) # <r^2>
# Estimated statistically independent measurements = 2N/t
if detail:
results['N'] = 2*disp.icol(0).count(level=0).div(Series(lagtimes))
results['lagt'] = results.index.values/fps
return results[:-1]
def imsd(traj, mpp, fps, max_lagtime=100, statistic='msd', pos_columns=['x', 'y']):
"""Compute the mean squared displacement of each particle.
Parameters
----------
traj : DataFrame of trajectories of multiple particles, including
columns particle, frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
statistic : {'msd', '<x>', '<y>', '<x^2>', '<y^2>'}, default is 'msd'
The functions msd() and emsd() return all these as columns. For
imsd() you have to pick one.
Returns
-------
DataFrame([Probe 1 msd, Probe 2 msd, ...], index=t)
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
"""
ids = []
msds = []
# Note: Index is set by msd, so we don't need to worry
# about conformity here.
for pid, ptraj in traj.groupby('particle'):
msds.append(msd(ptraj, mpp, fps, max_lagtime, False, pos_columns))
ids.append(pid)
results = pd.concat(msds, keys=ids)
# Swap MultiIndex levels so that unstack() makes particles into columns.
results = results.swaplevel(0, 1)[statistic].unstack()
lagt = results.index.values.astype('float64')/float(fps)
results.set_index(lagt, inplace=True)
results.index.name = 'lag time [s]'
return results
def emsd(traj, mpp, fps, max_lagtime=100, detail=False, pos_columns=['x', 'y']):
"""Compute the ensemble mean squared displacements of many particles.
Parameters
----------
traj : DataFrame of trajectories of multiple particles, including
columns particle, frame, x, and y
mpp : microns per pixel
fps : frames per second
max_lagtime : intervals of frames out to which MSD is computed
Default: 100
detail : Set to True to include <x>, <y>, <x^2>, <y^2>. Returns
only <r^2> by default.
Returns
-------
Series[msd, index=t] or, if detail=True,
DataFrame([<x>, <y>, <x^2>, <y^2>, msd], index=t)
Notes
-----
Input units are pixels and frames. Output units are microns and seconds.
"""
ids = []
msds = []
for pid, ptraj in traj.reset_index(drop=True).groupby('particle'):
msds.append(msd(ptraj, mpp, fps, max_lagtime, True, pos_columns))
ids.append(pid)
msds = pd.concat(msds, keys=ids, names=['particle', 'frame'])
results = msds.mul(msds['N'], axis=0).mean(level=1) # weighted average
results = results.div(msds['N'].mean(level=1), axis=0) # weights normalized
# Above, lagt is lumped in with the rest for simplicity and speed.
# Here, rebuild it from the frame index.
if not detail:
return results.set_index('lagt')['msd']
return results
def compute_drift(traj, smoothing=0, pos_columns=['x', 'y']):
"""Return the ensemble drift, x(t).
Parameters
----------
traj : DataFrame of trajectories, including columns x, y, frame, and particle
smoothing : integer
Smooth the drift using a forward-looking rolling mean over
this many frames.
Returns
-------
drift : DataFrame([x, y], index=frame)
Examples
--------
compute_drift(traj).plot() # Default smoothing usually smooths too much.
compute_drift(traj, 0).plot() # not smoothed
compute_drift(traj, 15).plot() # Try various smoothing values.
drift = compute_drift(traj, 15) # Save good drift curves.
corrected_traj = subtract_drift(traj, drift) # Apply them.
"""
# Probe by particle, take the difference between frames.
delta = pd.concat([t.set_index('frame', drop=False).diff()
for p, t in traj.groupby('particle')])
# Keep only deltas between frames that are consecutive.
delta = delta[delta['frame'] == 1]
# Restore the original frame column (replacing delta frame).
del delta['frame']
delta.reset_index(inplace=True)
dx = delta.groupby('frame').mean()
if smoothing > 0:
dx = pd.rolling_mean(dx, smoothing, min_periods=0)
x = dx.cumsum(0)[pos_columns]
return x
def subtract_drift(traj, drift=None):
"""Return a copy of particle trajectores with the overall drift subtracted out.
Parameters
----------
traj : DataFrame of trajectories, including columns x, y, and frame
drift : optional DataFrame([x, y], index=frame) like output of
compute_drift(). If no drift is passed, drift is computed from traj.
Returns
-------
traj : a copy, having modified columns x and y
"""
if drift is None:
drift = compute_drift(traj)
return traj.set_index('frame', drop=False).sub(drift, fill_value=0)
def is_typical(msds, frame, lower=0.1, upper=0.9):
"""Identify which paritcles' MSDs are in the central quantile.
Parameters
----------
msds : DataFrame
This should be organized like the output of imsd().
Columns correspond to particles, indexed by lagtime in frames.
frame : integer
Compare MSDs at this lag interval.
lower : float between 0 and 1, default 0.1
Probes with MSD up to this quantile are deemed outliers.
upper : float between 0 and 1, default 0.9
Probes with MSD above this quantile are deemed outliers.
Returns
-------
Series of boolean values, indexed by particle number
True = typical particle, False = outlier particle
Examples
--------
m = tp.imsd(traj, MPP, FPS)
# Index by particle ID, slice using boolean output from is_typical(), and then
# restore the original index, frame number.
typical_traj = traj.set_index('particle').ix[is_typical(m)].reset_index()\
.set_index('frame', drop=False)
"""
a, b = msds.iloc[frame].quantile(lower), msds.iloc[frame].quantile(upper)
return (msds.iloc[frame] > a) & (msds.iloc[frame] < b)
def vanhove(pos, lagtime, mpp=1, ensemble=False, bins=24):
"""Compute the van Hove correlation (histogram of displacements).
The van Hove correlation function is simply a histogram of particle
displacements. It is useful for detecting physical heterogeneity
(or tracking errors).
Parameters
----------
pos : DataFrame
x or (or!) y positions, one column per particle, indexed by frame
lagtime : integer interval of frames
Compare the correlation function at this lagtime.
mpp : microns per pixel, DEFAULT TO 1 because it is usually fine to use
pixels for this analysis
ensemble : boolean, defaults False
bins : integer or sequence
Specify a number of equally spaced bins, or explicitly specifiy a
sequence of bin edges. See np.histogram docs.
Returns
-------
vh : DataFrame or Series
If ensemble=True, a DataFrame with each particle's van Hove correlation
function, indexed by displacement. If ensemble=False, a Series with
the van Hove correlation function of the whole ensemble.
Examples
--------
pos = traj.set_index(['frame', 'particle'])['x'].unstack() # particles as columns
vh = vanhove(pos)
"""
# Reindex with consecutive frames, placing NaNs in the gaps.
pos = pos.reindex(np.arange(pos.index[0], 1 + pos.index[-1]))
assert lagtime <= pos.index.values.max(), \
"There is a no data out to frame %s. " % pos.index.values.max()
disp = mpp*pos.sub(pos.shift(lagtime))
# Let np.histogram choose the best bins for all the data together.
values = disp.values.flatten()
values = values[np.isfinite(values)]
global_bins = np.histogram(values, bins=bins)[1]
# Use those bins to histogram each column by itself.
vh = disp.apply(
lambda x: Series(np.histogram(x, bins=global_bins, density=True)[0]))
vh.index = global_bins[:-1]
if ensemble:
return vh.sum(1)/len(vh.columns)
else:
return vh
def diagonal_size(single_trajectory, pos_columns=None, t_column='frame'):
"""Measure the diagonal size of a trajectory.
Parameters
----------
single_trajectory : DataFrame containing a single trajectory
pos_columns = list
names of column with position ['x', 'y']
t_column = 'frame'
Returns
-------
float : length of diangonal of rectangular box containing the trajectory
Examples
--------
>>> diagonal_size(single_trajectory)
>>> many_trajectories.groupby('particle').agg(tp.diagonal_size)
>>> many_trajectories.groupby('particle').filter(lambda x: tp.diagonal_size(x) > 5)
"""
if pos_columns is None:
pos_columns = ['x', 'y']
pos = single_trajectory.set_index(t_column)[pos_columns]
return np.sqrt(np.sum(pos.apply(np.ptp)**2))
def is_localized(traj, threshold=0.4):
raise NotImplementedError("This function has been removed.")
def is_diffusive(traj, threshold=0.9):
raise NotImplementedError("This function has been removed.")
def relate_frames(t, frame1, frame2, pos_columns=None):
"""Find the displacement vector of all particles between two frames.
Parameters
----------
t : DataFrame
trajectories
pos_columns = list
names of column with position ['x', 'y']
frame1 : integer
frame2 : integer
Returns
-------
DataFrame
indexed by particle, containing:
x, y, etc. (corresponding to frame1)
x_b, y_b, etc. (corresponding to frame2)
dx, dy, etc.
dr
direction (only if pos_columns=['x', 'y'])
"""
if pos_columns is None:
pos_columns = ['x', 'y']
a = t[t.frame == frame1]
b = t[t.frame == frame2]
j = a.set_index('particle')[pos_columns].join(
b.set_index('particle')[pos_columns], rsuffix='_b')
for pos in pos_columns:
j['d' + pos] = j[pos + '_b'] - j[pos]
j['dr'] = np.sqrt(np.sum([j['d' + pos]**2 for pos in pos_columns], 0))
if pos_columns == ['x', 'y']:
j['direction'] = np.arctan2(j.dy, j.dx)
return j
def direction_corr(t, frame1, frame2):
"""Compute the cosine between every pair of particles' displacements.
Parameters
----------
t : DataFrame
trajectories, containing columns particle, frame, x, and y
frame1 : frame number
frame2 : frame number
Returns
-------
DataFrame, indexed by particle, including dx, dy, and direction
"""
j = relate_frames(t, frame1, frame2)
cosine = np.cos(np.subtract.outer(j.direction, j.direction))
r = np.sqrt(np.subtract.outer(j.x, j.x)**2 +
np.subtract.outer(j.y, j.y)**2)
upper_triangle = np.triu_indices_from(r, 1)
result = DataFrame({'r': r[upper_triangle],
'cos': cosine[upper_triangle]})
return result
def velocity_corr(t, frame1, frame2):
"""Compute the velocity correlation between
every pair of particles' displacements.
Parameters
----------
t : DataFrame
trajectories, containing columns particle, frame, x, and y
frame1 : frame number
frame2 : frame number
Returns
-------
DataFrame, indexed by particle, including dx, dy, and direction
"""
j = relate_frames(t, frame1, frame2)
cosine = np.cos(np.subtract.outer(j.direction, j.direction))
r = np.sqrt(np.subtract.outer(j.x, j.x)**2 +
np.subtract.outer(j.y, j.y)**2)
dot_product = cosine*np.abs(np.multiply.outer(j.dr, j.dr))
upper_triangle = np.triu_indices_from(r, 1)
result = DataFrame({'r': r[upper_triangle],
'dot_product': dot_product[upper_triangle]})
return result
def theta_entropy(pos, bins=24, plot=True):
"""Plot the distrbution of directions and return its Shannon entropy.
Parameters
----------
pos : DataFrame with columns x and y, indexed by frame
bins : number of equally-spaced bins in distribution. Default 24.
plot : plot direction historgram if True
Returns
-------
float : Shannon entropy
Examples
--------
>>> theta_entropy(t[t['particle'] == 3].set_index('frame'))
>>> S = t.set_index('frame').groupby('particle').apply(tp.theta_entropy)
"""
disp = pos - pos.shift(1)
direction = np.arctan2(disp['y'], disp['x'])
bins = np.linspace(-np.pi, np.pi, bins + 1)
if plot:
Series(direction).hist(bins=bins)
return shannon_entropy(direction.dropna(), bins)
def shannon_entropy(x, bins):
"""Compute the Shannon entropy of the distribution of x."""
hist = np.histogram(x, bins)[0]
hist = hist.astype('float64')/hist.sum() # normalize probablity dist.
entropy = -np.sum(np.nan_to_num(hist*np.log(hist)))
return entropy
def min_rolling_theta_entropy(pos, window=24, bins=24):
"""Compute the minimum Shannon entropy in any window.
Parameters
----------
pos : DataFrame with columns x and y, indexed by frame
window : number of observations per window
bins : number of equally-spaced bins in distribution. Default 24.
Returns
-------
float : Shannon entropy
Examples
--------
>>> theta_entropy(t[t['particle'] == 3].set_index('frame'))
>>> S = t.set_index('frame').groupby('particle').apply(
... tp.min_rolling_theta_entropy)
"""
disp = pos - pos.shift(1)
direction = np.arctan2(disp['y'], disp['x'])
bins = np.linspace(-np.pi, np.pi, bins + 1)
f = lambda x: shannon_entropy(x, bins)
return pd.rolling_apply(direction.dropna(), window, f).min()
def proximity(features, pos_columns=None):
"""Find the distance to each feature's nearest neighbor.
Parameters
----------
features : DataFrame
pos_columns : list of column names
['x', 'y'] by default
Returns
-------
proximity : DataFrame
distance to each particle's nearest neighbor,
indexed by particle if 'particle' column is present in input
Examples
--------
Find the proximity of each particle to its nearest neighbor in every frame.
>>> prox = t.groupby('frame').apply(proximity).reset_index()
>>> avg_prox = prox.groupby('particle')['proximity'].mean()
And filter the trajectories...
>>> particle_nos = avg_prox[avg_prox > 20].index
>>> t_filtered = t[t['particle'].isin(particle_nos)]
"""
if pos_columns is None:
pos_columns = ['x', 'y']
leaf_size = max(1, int(np.round(np.log10(len(features)))))
tree = cKDTree(features[pos_columns].copy(), leaf_size)
proximity = tree.query(tree.data, 2)[0][:, 1]
result = DataFrame({'proximity': proximity})
if 'particle' in features:
result.set_index(features['particle'], inplace=True)
return result
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/motion.py",
"copies": "1",
"size": "16740",
"license": "bsd-3-clause",
"hash": 629275954714549200,
"line_mean": 32.75,
"line_max": 87,
"alpha_frac": 0.6287335723,
"autogenerated": false,
"ratio": 3.628874918707999,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47576084910079997,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from abc import ABCMeta, abstractmethod, abstractproperty
import warnings
import pandas as pd
from .utils import print_update
class FramewiseData(object):
"Abstract base class defining a data container with framewise access."
__metaclass__ = ABCMeta
@abstractmethod
def put(self, df):
pass
@abstractmethod
def get(self, frame_no):
pass
@abstractproperty
def frames(self):
pass
@abstractmethod
def close(self):
pass
@abstractproperty
def t_column(self):
pass
def __getitem__(self, frame_no):
return self.get(frame_no)
def __len__(self):
return len(self.frames)
def dump(self, N=None):
"""Return data from all, or the first N, frames in a single DataFrame
Parameters
----------
N : integer
optional; if None, return all frames
Returns
-------
DataFrame
"""
if N is None:
return pd.concat(iter(self))
else:
i = iter(self)
return pd.concat((next(i) for _ in range(N)))
@property
def max_frame(self):
return max(self.frames)
def _validate(self, df):
if self.t_column not in df.columns:
raise ValueError("Cannot write frame without a column "
"called {0}".format(self.t_column))
if df[self.t_column].nunique() != 1:
raise ValueError("Found multiple values for 'frame'. "
"Write one frame at a time.")
def __iter__(self):
return self._build_generator()
def _build_generator(self):
for frame_no in self.frames:
yield self.get(frame_no)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
KEY_PREFIX = 'Frame_'
len_key_prefix = len(KEY_PREFIX)
def code_key(frame_no):
"Turn the frame_no into a 'natural name' string idiomatic of HDFStore"
key = '{0}{1}'.format(KEY_PREFIX, frame_no)
return key
def decode_key(key):
frame_no = int(key[len_key_prefix:])
return frame_no
class PandasHDFStore(FramewiseData):
"""An interface to an HDF5 file with framewise access, using pandas.
Save each frame's data to a node in a pandas HDFStore.
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, mode='a', t_column='frame', **kwargs):
self.filename = os.path.abspath(filename)
self._t_column = t_column
self.store = pd.HDFStore(self.filename, mode, **kwargs)
@property
def t_column(self):
return self._t_column
@property
def max_frame(self):
return max(self.frames)
def put(self, df):
if len(df) == 0:
warnings.warn('An empty DataFrame was passed to put(). Continuing.')
return
frame_no = df[self.t_column].values[0] # validated to be all the same
key = code_key(frame_no)
# Store data as tabular instead of fixed-format.
# Make sure remove any prexisting data, so don't really 'append'.
try:
self.store.remove(key)
except KeyError:
pass
self.store.put(key, df, format='table')
def get(self, frame_no):
key = code_key(frame_no)
frame = self.store.get(key)
return frame
@property
def frames(self):
"""Returns sorted list of integer frame numbers in file"""
return self._get_frame_nos()
def _get_frame_nos(self):
"""Returns sorted list of integer frame numbers in file"""
# Pandas' store.keys() scans the entire file looking for stored Pandas
# structures. This is very slow for large numbers of frames.
# Instead, scan the root level of the file for nodes with names
# matching our scheme; we know they are DataFrames.
r = [decode_key(key) for key in self.store.root._v_children.keys() if
key.startswith(KEY_PREFIX)]
r.sort()
return r
def close(self):
self.store.close()
class PandasHDFStoreBig(PandasHDFStore):
"""Like PandasHDFStore, but keeps a cache of frame numbers.
This can give a large performance boost when a file contains thousands
of frames.
If a file was made in PandasHDFStore, opening it with this class
and then closing it will add a cache (if mode != 'r').
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, mode='a', t_column='frame', **kwargs):
self._CACHE_NAME = '_Frames_Cache'
self._frames_cache = None
self._cache_dirty = False # Whether _frames_cache needs to be written out
super(PandasHDFStoreBig, self).__init__(filename, mode, t_column,
**kwargs)
@property
def frames(self):
# Hit memory cache, then disk cache
if self._frames_cache is not None:
return self._frames_cache
else:
try:
self._frames_cache = list(self.store[self._CACHE_NAME].index.values)
self._cache_dirty = False
except KeyError:
self._frames_cache = self._get_frame_nos()
self._cache_dirty = True # In memory, but not in file
return self._frames_cache
def put(self, df):
self._invalidate_cache()
super(PandasHDFStoreBig, self).put(df)
def rebuild_cache(self):
"""Delete cache on disk and rebuild it."""
self._invalidate_cache()
_ = self.frames # Compute cache
self._flush_cache()
def _invalidate_cache(self):
self._frames_cache = None
try:
del self.store[self._CACHE_NAME]
except KeyError: pass
def _flush_cache(self):
"""Writes frame cache if dirty and file is writable."""
if (self._frames_cache is not None and self._cache_dirty
and self.store.root._v_file._iswritable()):
self.store[self._CACHE_NAME] = pd.DataFrame({'dummy': 1},
index=self._frames_cache)
self._cache_dirty = False
def close(self):
"""Updates cache, writes if necessary, then closes file."""
if self.store.root._v_file._iswritable():
_ = self.frames # Compute cache
self._flush_cache()
super(PandasHDFStoreBig, self).close()
class PandasHDFStoreSingleNode(FramewiseData):
"""An interface to an HDF5 file with framewise access,
using pandas, that is faster for cross-frame queries.
This implementation is more complex than PandasHDFStore,
but it simplifies (speeds up?) cross-frame queries,
like queries for a single probe's entire trajectory.
Any additional keyword arguments to the constructor are passed to
pandas.HDFStore().
"""
def __init__(self, filename, key='FrameData', mode='a', t_column='frame',
use_tabular_copy=False, **kwargs):
self.filename = os.path.abspath(filename)
self.key = key
self._t_column = t_column
self.store = pd.HDFStore(self.filename, mode, **kwargs)
with pd.get_store(self.filename) as store:
try:
store[self.key]
except KeyError:
pass
else:
self._validate_node(use_tabular_copy)
@property
def t_column(self):
return self._t_column
def put(self, df):
if len(df) == 0:
warnings.warn('An empty DataFrame was passed to put(). Continuing.')
return
self._validate(df)
self.store.append(self.key, df, data_columns=True)
def get(self, frame_no):
frame = self.store.select(self.key, '{0} == {1}'.format(
self._t_column, frame_no))
return frame
def dump(self, N=None):
"""Return data from all, or the first N, frames in a single DataFrame
Parameters
----------
N : integer
optional; if None, return all frames
Returns
-------
DataFrame
"""
if N is None:
return self.store.select(self.key)
else:
Nth_frame = self.frames[N - 1]
return self.store.select(self.key, '{0} <= {1}'.format(
self._t_column, Nth_frame))
def close(self):
self.store.close()
def __del__(self):
if hasattr(self, 'store'):
self.close()
@property
def frames(self):
"""Returns sorted list of integer frame numbers in file"""
# I assume one column can fit in memory, which is not ideal.
# Chunking does not seem to be implemented for select_column.
frame_nos = self.store.select_column(self.key, self.t_column).unique()
frame_nos.sort()
return frame_nos
def _validate_node(self, use_tabular_copy):
# The HDFStore might be non-tabular, which means we cannot select a
# subset, and this whole structure will not work.
# For convenience, this can rewrite the table into a tabular node.
if use_tabular_copy:
self.key = _make_tabular_copy(self.filename, self.key)
pandas_type = getattr(getattr(getattr(
self.store._handle.root, self.key, None), '_v_attrs', None),
'pandas_type', None)
if not pandas_type == 'frame_table':
raise ValueError("This node is not tabular. Call with "
"use_tabular_copy=True to proceed.")
def _make_tabular_copy(store, key):
"""Copy the contents nontabular node in a pandas HDFStore
into a tabular node"""
tabular_key = key + '/tabular'
print_update("Making a tabular copy of %s at %s" % (key, tabular_key))
store.append(tabular_key, store.get(key), data_columns=True)
return tabular_key
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/framewise_data.py",
"copies": "1",
"size": "10185",
"license": "bsd-3-clause",
"hash": -527852623594377700,
"line_mean": 30.0518292683,
"line_max": 84,
"alpha_frac": 0.5862542955,
"autogenerated": false,
"ratio": 4.064245810055866,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5150500105555865,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys
import datetime
from .. import QtCore, QtGui
from xray_vision.qt_widgets.displaydict import RecursiveTreeWidget
from collections import defaultdict
from .control_widgets import DateTimeBox, ComboBox, CheckBox, LineEdit
import traceback
import logging
logger = logging.getLogger(__name__)
_defaults = {
"empty_search": {
"No search results": None
},
"add_btn_text": "Add",
"input_box_type": LineEdit,
"has_check_box": True,
}
class QueryMainWindow(QtGui.QMainWindow):
"""
QueryMainWindow docstring
"""
# dict1 : search query
# dict2 : unique search id
# dict3 : run_header dict
add_btn_sig = QtCore.Signal(dict, dict, dict)
# dict :
search_btn_sig = QtCore.Signal(dict)
def __init__(self, keys, key_descriptions=None, parent=None,
search_func=None, add_func=None, add_btn_text=None,
unique_id_func=None):
"""
init docstring
Parameters
----------
keys : list
List of keys to use as search terms
key_descriptions : list
List of key descriptions which are used as the tool tips for the
search key labels
parent : QWidget
Parent widget that knows about this one
search_func : function
Executes when the "search" button is pressed. search_func must take
a dictionary as input
add_btn_text : str
Label for the add button
"""
QtGui.QMainWindow.__init__(self, parent)
self.setWindowTitle('Query example')
self._query_controller = QueryController(
keys=keys)
dock = QtGui.QDockWidget()
dock.setWidget(self._query_controller._query_input)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
self.setCentralWidget(self._query_controller._results_tree)
# connect the widget signals to the main window signals
self._query_controller.add_btn_sig.connect(self.add_btn_sig)
self._query_controller.search_btn_sig.connect(self.search_btn_sig)
# connect the search button to the main window search function
self.search_btn_sig.connect(self.search)
# connect the add button to the main window add function
self.add_btn_sig.connect(self.add)
# set the defaults
# register the functions
self.register_search_function(search_func)
self.register_add_function(add_func)
self.register_unique_id_gen_func(unique_id_func)
def register_search_function(self, search_func):
"""
Function that sets the behavior on clicking the 'search' button
Parameters
----------
func : Function
This function must take a dictionary parameter as input with the
following signature: some_search_function(search_dict)
"""
self._search_func = search_func
search_btn_enabled = True
if self._search_func is None:
search_btn_enabled = False
self._query_controller.enable_search_btn(is_enabled=search_btn_enabled)
def register_add_function(self, add_func):
"""
Function that sets the behavior on clicking the 'add' button
Parameters
----------
func : Function
function that executes when the 'add' button is clicked. This
function must have the signature;
some_add_function(query_dict, unique_id_dict, result_dict,
path_to_node_list)
where path_to_node_list is a series of keys that uniquely identify
the currently selected node in the add widget when iterated over.
Examples
--------
the following code will result in "node" being the currently selected
node in the tree widget
>>> node = result_dict
>>> for key in path_to_node_list:
>>> node = node[key]
"""
self._add_func = add_func
add_btn_enabled = True
if self._add_func is None:
add_btn_enabled = False
self._query_controller.enable_add_btn(is_enabled=add_btn_enabled)
def register_unique_id_gen_func(self, unique_id_func):
"""
Parameters
----------
unique_id_func : function
Function that generates a unique ID for a results dictionary. For
now, this function should probably just pick out the header_id
"""
self._query_controller.register_unique_id_gen_func(unique_id_func)
self._unique_id_func = unique_id_func
@QtCore.Slot(list)
def update_search_results(self, results):
"""
Pass through function to update the search results in the
results widget
Parameters
----------
results : array, list, object
"""
self._query_controller.update_search_results(results)
@QtCore.Slot(dict)
def search(self, a_dict):
"""
This function gets called when the search button is clicked
"""
logger.debug("search() function in QueryMainWindow")
return_val = self._search_func(a_dict)
self.update_search_results(return_val)
@QtCore.Slot(dict, dict, dict, list)
def add(self, search_query_dict, unique_id_dict, result_dict):
"""
This function gets called when the add button is clicked
"""
logger.debug("add() function in QueryMainWindow")
logger.debug("search_query_dict: {0}".format(search_query_dict))
logger.debug("unique_id_dict: {0}".format(unique_id_dict))
logger.debug("result_dict: {0}".format(result_dict))
self._add_func(search_query_dict, unique_id_dict, result_dict)
def update_query_keys(self, query_keys, query_key_descriptions):
"""
Simple pass-through function to update the query keys
"""
self._query_controller.update_query_keys(
query_keys=query_keys,
query_key_descriptions=query_key_descriptions
)
class QueryController(QtCore.QObject):
"""
The QueryController is a QObject that contains the search widget which is a
QDockWidget and the tree widget which is a QTreeWidget
Attributes
----------
_keys : list
List of search keys that will be displayed in the _query_input widget
_key_descriptions : list
List of descriptions for the keys that will appear as a tool tip on
mouse hover
_query_input : QtGui.QWidget
The widget that displays a series of text input boxes with a 'search'
button
_results_tree : xray_vision.qt_widgets.displaydict.RecursiveTreeWidget
The widget that displays the results as a tree with an 'add' button
_search_dict : dict
Dictionary that was unpacked into the search function. This attribute
gets stored every time the 'search' button gets clicked
_search_results : list
List of dictionaries that the search function returns
Methods
-------
update_search_results(results_list)
Populate the RecursiveTreeWidget with the results_list
enable_add_btn(bool)
Enable/disable the add button
enable_search_btn(bool)
Enable/disable the search button
add()
Function that executes when the 'add' button is clicked
search()
Function that executes when the 'search' button is clicked
read_search_boxes()
Read the text from the search boxes to form a search dictionary, stored
as _search_dict
update_query_keys(keys, key_descriptions=None)
Remake the query widget with new query keys and key_descriptions
"""
# external handles for the add button and search button
add_btn_sig = QtCore.Signal(dict, dict, dict, list)
search_btn_sig = QtCore.Signal(dict)
###################################################################
# Construction time behavior #
###################################################################
def __init__(self, keys, add_btn_text="Add", *args, **kwargs):
"""
Parameters
----------
keys : dict
keys = {
"key1" : {
"description" : "this is what key1 is for",
"type" : "this is the type of key1",
}
}
add_btn_text : str
Label for the add button
"""
# call up the inheritance chain
super(QueryController, self).__init__(*args, **kwargs)
self._keys = keys
# set up the query widget
self._query_input = self.construct_query()
# set up the results widget
self._results_tree = self.construct_results(add_btn_text)
self._search_dict = _defaults["empty_search"]
self.update_search_results(self._search_dict)
def construct_query(self):
"""
Construct the query widget
Returns
-------
QtGui.QGroupBox
group box that contains the query widget
"""
# declare the group box
query = QtGui.QGroupBox(title="Query")
# declare the search button
self._search_btn = QtGui.QPushButton(text="&Search")
# connect the search buttons clicked signal to the method which parses
# the text boxes to create a search dictionary that gets emitted by the
# externally facing search_btn_sig QtCore.Signal
self._search_btn.clicked.connect(self.search)
# declare the query widget
query_widg = self.construct_query_input()
# declare the layout as a vertical box layout
layout = QtGui.QVBoxLayout()
# add the widgets to the layout
layout.addWidget(query_widg)
layout.addWidget(self._search_btn)
# set the layout of the group box
query.setLayout(layout)
# return the widget
return query
def construct_results(self, add_btn_text):
"""
Construct the results widget
Returns
-------
QtGui.QGroupBox
group box that contains the results widget along with the 'add'
button
"""
# declare a group box
_results = QtGui.QGroupBox(title="Results")
# declare the layout as a vertical box
layout = QtGui.QVBoxLayout()
# declare the tree widget
self._tree = RecursiveTreeWidget()
# declare the "add to canvas" button
self._add_btn = QtGui.QPushButton(text=add_btn_text)
# connect the add button clicked signal to the externally facing
# "add_btn_signal" QtCore.SIGNAL
self._add_btn.clicked.connect(self.add)
# add the tree widget to the layout
layout.addWidget(self._tree)
# add the button to the layout
layout.addWidget(self._add_btn)
# set the layout of the group box
_results.setLayout(layout)
# return the results group box
return _results
def construct_query_input(self, keys=None):
"""
Construct the input boxes for the query.
Parameters
-------
keys : dict
keys = {
"key1" : {
"description" : "this is what key1 is for",
"type" : "this is the type of key1",
}
}
Returns
-------
QWidget
This is the widget that contains the search keys as labels and
their input boxes typed on "type"
"""
# default behavior of keys input parameter
if keys is None:
keys = self._keys
self._keys = keys
# declare a vertical layout
vert_layout = QtGui.QVBoxLayout()
try:
# if the input boxes dictionary exists, empty it
self._input_boxes.clear()
except AttributeError:
# create a new dictionary
self._input_boxes = {}
_lookup_dict = {str: LineEdit,
int: LineEdit,
float: LineEdit,
datetime.datetime: DateTimeBox,
bool: CheckBox,
list: ComboBox}
# loop over the keys to create an input box for each key
for key in keys:
# declare a new horizontal layout
horz_layout = QtGui.QHBoxLayout()
# declare the label
lbl = QtGui.QLabel(key)
try:
# get the description from the nested dict
description = keys[key]["description"]
except KeyError:
# use the key as the description
description = key
try:
# get the key_type from the nested dict
key_type = keys[key]["type"]
except KeyError:
# default to string typed
key_type = str
input_box_type = _lookup_dict[key_type]
# declare the input box
input_box = input_box_type(label_text=key, hover_text=description,
has_check_box=_defaults["has_check_box"])
# add the input box to the input_boxes dict
self._input_boxes[key] = input_box
# add the widgets to the layout
horz_layout.addWidget(input_box)
# set a dummy widget
widg = QtGui.QWidget()
widg.setLayout(horz_layout)
# add the horizontal layout to the vertical layout
vert_layout.addWidget(widg)
query_input = QtGui.QWidget()
query_input.setLayout(vert_layout)
# return the vertical layout
return query_input
############################################################################
# Runtime behavior #
############################################################################
def register_unique_id_gen_func(self, unique_id_func):
"""
Parameters
----------
unique_id_func : function
Function that generates a unique ID for a results dictionary. For
now, this function should probably just pick out the header_id
"""
self._unique_id_func = unique_id_func
def enable_search_btn(self, is_enabled):
"""
Function to enable/disable the search button
Parameters
----------
is_enabled : bool
enables/disables the search button
"""
self._search_btn.setEnabled(is_enabled)
def enable_add_btn(self, is_enabled):
"""
Function to enable/disable the search button
Parameters
----------
is_enabled : bool
enables/disables the search button
"""
self._add_btn.setEnabled(is_enabled)
@QtCore.Slot()
def add(self):
"""
Figure out which result is clicked and emit the add_btn_sig with the
following arguments:
dict1 : dict
Dictionary of search keys used to generate the results shown in the
tree widget
dict2 : dict
unique id dictionary that is guaranteed to return dict3 when
unpacked into the registered search function
dict3 : dict
One results dictionary
list : list
path to the currently selected node in the tree widget
"""
# TODO Change this to debugger level logging
logger.debug("add_clicked")
path_to_node, result_idx = self._tree.find_root()
print(self._search_results.__class__)
res_keys = list(self._search_results)
res_keys.sort()
cur_result_dict = self._search_results[res_keys[result_idx]]
print(list(cur_result_dict))
# todo ask the tree nicely for its currently selected dictionary
# unique_id = tree.get_current()
self.add_btn_sig.emit(self._search_dict,
self.create_unique_id(cur_result_dict),
cur_result_dict, path_to_node)
def create_unique_id(self, result_dict):
"""
Call the unique id function that was registered
Parameters
----------
result_dict : dict
Dictionary that will be used to generate a unique id dictionary.
Returns
-------
unique_id : dict
The unique id dictionary is guaranteed to produce "result_dict" when
unpacked into the search function
"""
return self._unique_id_func(result_dict)
@QtCore.Slot()
def search(self):
"""
Parse the search boxes and emit it as a signal
"""
self.read_search_boxes()
# once the dictionary is constructed, emit it as a signal
self.search_btn_sig.emit(self._search_dict)
@QtCore.Slot()
def read_search_boxes(self):
"""
Parse the search boxes to set up the query dictionary and store it as an
instance variable "_search_dict"
"""
# declare the search dict
# TODO Change this to debugger level logging @tacaswell
logger.debug("read_search_boxes")
self._search_dict = {}
print(self._input_boxes)
try:
# loop over the list of input boxes to extract the search string
# todo need better list comprehension
self._search_dict = {key: self._input_boxes[key].getValue()
for key in self._input_boxes if
self._input_boxes[key].getValue() is not None}
except AttributeError as e:
tb = traceback.format_exc()
logger.error(tb)
# the only time this will be caught is in the initial setup and it
# is therefore ok to ignore this error
pass
@QtCore.Slot(list)
def update_search_results(self, results):
"""
Pass the search results to the recursive tree widget which displays them
Parameters
----------
results : array, list, object
"""
# stash the search results for later use
self._search_results = results
self._tree.fill_widget(results)
self.enable_add_btn(is_enabled=True)
# todo enable add button only when something is selected
# todo status bar to display feedback
# todo sequence diagrams for runtime behavior
| {
"repo_name": "ericdill/xray-vision",
"path": "xray_vision/qt_widgets/query_widget.py",
"copies": "6",
"size": "18742",
"license": "bsd-3-clause",
"hash": 3032589046550332000,
"line_mean": 33.0145190563,
"line_max": 80,
"alpha_frac": 0.5754455234,
"autogenerated": false,
"ratio": 4.6151194287121395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00015684165714413748,
"num_lines": 551
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import sys
import importlib
from collections import OrderedDict
from . import try_numba
from . import preprocessing
def performance_report():
"""Display summary of which optional speedups are installed/enabled"""
print("Yes, but could it be faster?")
if try_numba.NUMBA_AVAILABLE:
print("FAST: numba is available and enabled "
"(fast subnets and feature-finding).")
else:
print("SLOW: numba was not found")
if preprocessing.USING_FFTW:
print("FAST: Using pyfftw for image preprocessing.")
else:
print("SLOW: pyfftw not found (slower image preprocessing).")
def dependencies():
"""
Give the version of each of the dependencies -- useful for bug reports.
Returns
-------
result : dict
mapping the name of each package to its version string or, if an
optional dependency is not installed, None
"""
packages = ['six', 'numpy', 'scipy', 'matplotlib', 'pandas',
'scikit-image', 'pyyaml', 'pytables', 'numba', 'pyfftw']
result = OrderedDict()
for package_name in packages:
try:
package = importlib.import_module(package_name)
except ImportError:
result[package_name] = None
else:
try:
version = package.__version__
except AttributeError:
version = package.version # pyfftw does not have __version__
result[package_name] = version
# Build Python version string
version_info = sys.version_info
version_string = '.'.join(map(str, [version_info[0], version_info[1],
version_info[2]]))
result['python'] = version_string
return result
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/diag.py",
"copies": "1",
"size": "1858",
"license": "bsd-3-clause",
"hash": -2284602226967704600,
"line_mean": 32.1785714286,
"line_max": 77,
"alpha_frac": 0.6119483315,
"autogenerated": false,
"ratio": 4.4556354916067145,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 56
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import unittest
import nose
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose
from pandas import DataFrame, Series
import trackpy as tp
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
class TestCorrelations(unittest.TestCase):
def setUp(self):
np.random.seed(0)
randn = np.random.randn
N = 500
a = DataFrame(randn(N, 2), columns=['x', 'y'])
b = DataFrame(a[['x', 'y']] + 0.1*randn(N, 2), columns=['x', 'y'])
a['particle'] = np.arange(N)
b['particle'] = np.arange(N)
a['frame'] = 0
b['frame'] = 1
self.random_walk = pd.concat([a, b])
def test_no_correlations(self):
v = tp.velocity_corr(self.random_walk, 0, 1)
binned = v.groupby(np.digitize(v.r, np.linspace(0, 1, 10))).mean()
actual = binned['dot_product']
expected = np.zeros_like(actual)
assert_allclose(actual, expected, atol=1e-3)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/tests/test_correlations.py",
"copies": "2",
"size": "1278",
"license": "bsd-3-clause",
"hash": 1775464732556269300,
"line_mean": 30.1707317073,
"line_max": 75,
"alpha_frac": 0.5938967136,
"autogenerated": false,
"ratio": 3.372031662269129,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9958575589162968,
"avg_score": 0.001470557341232327,
"num_lines": 41
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import unittest
import nose
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from numpy.testing import assert_almost_equal, assert_allclose
from numpy.testing.decorators import slow
from pandas.util.testing import (assert_series_equal, assert_frame_equal,
assert_almost_equal)
import trackpy as tp
from trackpy.utils import suppress_plotting
# Catch attempts to set values on an inadvertent copy of a Pandas object.
tp.utils.make_pandas_strict()
def random_walk(N):
return np.cumsum(np.random.randn(N))
def conformity(df):
"Organize toy data to look like real data."
return df.set_index('frame', drop=False).sort(['frame', 'particle']). \
astype('float64')
class TestDrift(unittest.TestCase):
def setUp(self):
N = 10
Y = 1
a = DataFrame({'x': np.zeros(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.dead_still = conformity(pd.concat([a, b]))
P = 1000 # particles
A = 0.00001 # step amplitude
np.random.seed(0)
particles = [DataFrame({'x': A*random_walk(N),
'y': A*random_walk(N),
'frame': np.arange(N), 'particle': i}) for i in range(P)]
self.many_walks = conformity(pd.concat(particles))
a = DataFrame({'x': np.arange(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.steppers = conformity(pd.concat([a, b]))
def test_no_drift(self):
N = 10
expected = DataFrame({'x': np.zeros(N), 'y': np.zeros(N)}).iloc[1:]
expected = expected.astype('float')
expected.index.name = 'frame'
expected.columns = ['x', 'y']
# ^ no drift measured for Frame 0
actual = tp.compute_drift(self.dead_still)
assert_frame_equal(actual, expected)
# Small random drift
actual = tp.compute_drift(self.many_walks)
assert_frame_equal(actual, expected)
def test_constant_drift(self):
N = 10
expected = DataFrame({'x': np.arange(N), 'y': np.zeros(N)}).iloc[1:]
expected = expected.astype('float')
expected.index.name = 'frame'
expected.columns = ['x', 'y']
actual = tp.compute_drift(self.steppers)
assert_frame_equal(actual, expected)
def test_subtract_zero_drift(self):
N = 10
drift = DataFrame(np.zeros((N - 1, 2)),
index=np.arange(1, N)).astype('float64')
drift.columns = ['x', 'y']
drift.index.name = 'frame'
actual = tp.subtract_drift(self.dead_still, drift)
assert_frame_equal(actual, self.dead_still)
actual = tp.subtract_drift(self.many_walks, drift)
assert_frame_equal(actual, self.many_walks)
actual = tp.subtract_drift(self.steppers, drift)
assert_frame_equal(actual, self.steppers)
def test_subtract_constant_drift(self):
N = 10
# Add a constant drift here, and then use subtract_drift to
# subtract it.
drift = DataFrame(np.outer(np.arange(N - 1), [1, 1]),
index=np.arange(1, N))
drift.columns = ['x', 'y']
drift.index.name = 'frame'
actual = tp.subtract_drift(
self.dead_still.add(drift, fill_value=0), drift)
assert_frame_equal(actual, self.dead_still)
actual = tp.subtract_drift(
self.many_walks.add(drift, fill_value=0), drift)
assert_frame_equal(actual, self.many_walks)
actual = tp.subtract_drift(
self.steppers.add(drift, fill_value=0), drift)
assert_frame_equal(actual, self.steppers)
class TestMSD(unittest.TestCase):
def setUp(self):
N = 10
Y = 1
a = DataFrame({'x': np.zeros(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.zeros(N - 1), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.dead_still = conformity(pd.concat([a, b]))
P = 50 # particles
A = 1 # step amplitude
np.random.seed(0)
particles = [DataFrame({'x': A*random_walk(N),
'y': A*random_walk(N),
'frame': np.arange(N), 'particle': i}) for i in range(P)]
self.many_walks = conformity(pd.concat(particles))
a = DataFrame({'x': np.arange(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.steppers = conformity(pd.concat([a, b]))
def test_zero_emsd(self):
N = 10
actual = tp.emsd(self.dead_still, 1, 1)
expected = Series(np.zeros(N)).iloc[1:].astype('float64')
assert_series_equal(actual, expected)
def test_linear_emsd(self):
A = 1
EARLY = 7 # only early lag times have good stats
actual = tp.emsd(self.many_walks, 1, 1, max_lagtime=EARLY)
a = np.arange(EARLY, dtype='float64')
expected = Series(2*A*a, index=a).iloc[1:]
expected.name = 'msd'
expected.index.name = 'lag time [s]'
# HACK: Float64Index imprecision ruins index equality.
# Test them separately. If that works, make them exactly the same.
assert_almost_equal(actual.index.values, expected.index.values)
actual.index = expected.index
assert_series_equal(np.round(actual), expected)
class TestSpecial(unittest.TestCase):
def setUp(self):
N = 10
Y = 1
a = DataFrame({'x': np.arange(N), 'y': np.zeros(N),
'frame': np.arange(N), 'particle': np.zeros(N)})
b = DataFrame({'x': np.arange(1, N), 'y': Y + np.zeros(N - 1),
'frame': np.arange(1, N), 'particle': np.ones(N - 1)})
self.steppers = conformity(pd.concat([a, b]))
def test_theta_entropy(self):
# just a smoke test
theta_entropy = lambda x: tp.motion.theta_entropy(x, plot=False)
self.steppers.groupby('particle').apply(theta_entropy)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/tests/test_motion.py",
"copies": "1",
"size": "6826",
"license": "bsd-3-clause",
"hash": 5962750244945369000,
"line_mean": 38.4566473988,
"line_max": 77,
"alpha_frac": 0.5627014357,
"autogenerated": false,
"ratio": 3.3658777120315584,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44285791477315584,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.spatial import cKDTree
from pandas import DataFrame
from .preprocessing import bandpass, scale_to_gamut, scalefactor_to_gamut
from .utils import record_meta, print_update, validate_tuple
from .masks import (binary_mask, N_binary_mask, r_squared_mask,
x_squared_masks, cosmask, sinmask)
from .uncertainty import _static_error, measure_noise
import trackpy # to get trackpy.__version__
from .try_numba import NUMBA_AVAILABLE
from .feature_numba import (_numba_refine_2D, _numba_refine_2D_c,
_numba_refine_2D_c_a, _numba_refine_3D)
def percentile_threshold(image, percentile):
"""Find grayscale threshold based on distribution in image."""
not_black = image[np.nonzero(image)]
if len(not_black) == 0:
return np.nan
return np.percentile(not_black, percentile)
def local_maxima(image, radius, percentile=64, margin=None):
"""Find local maxima whose brightness is above a given percentile.
Parameters
----------
radius : integer definition of "local" in "local maxima"
percentile : chooses minimum grayscale value for a local maximum
margin : zone of exclusion at edges of image. Defaults to radius.
A smarter value is set by locate().
"""
if margin is None:
margin = radius
ndim = image.ndim
# Compute a threshold based on percentile.
threshold = percentile_threshold(image, percentile)
if np.isnan(threshold):
warnings.warn("Image is completely black.", UserWarning)
return np.empty((0, ndim))
# The intersection of the image with its dilation gives local maxima.
if not np.issubdtype(image.dtype, np.integer):
raise TypeError("Perform dilation on exact (i.e., integer) data.")
footprint = binary_mask(radius, ndim)
dilation = ndimage.grey_dilation(image, footprint=footprint,
mode='constant')
maxima = np.vstack(np.where((image == dilation) & (image > threshold))).T
if not np.size(maxima) > 0:
warnings.warn("Image contains no local maxima.", UserWarning)
return np.empty((0, ndim))
# Do not accept peaks near the edges.
shape = np.array(image.shape)
near_edge = np.any((maxima < margin) | (maxima > (shape - margin - 1)), 1)
maxima = maxima[~near_edge]
if not np.size(maxima) > 0:
warnings.warn("All local maxima were in the margins.", UserWarning)
# Return coords in as a numpy array shaped so it can be passed directly
# to the DataFrame constructor.
return maxima
def estimate_mass(image, radius, coord):
"Compute the total brightness in the neighborhood of a local maximum."
square = [slice(c - rad, c + rad + 1) for c, rad in zip(coord, radius)]
neighborhood = binary_mask(radius, image.ndim)*image[square]
return np.sum(neighborhood)
def estimate_size(image, radius, coord, estimated_mass):
"Compute the total brightness in the neighborhood of a local maximum."
square = [slice(c - rad, c + rad + 1) for c, rad in zip(coord, radius)]
neighborhood = binary_mask(radius, image.ndim)*image[square]
Rg = np.sqrt(np.sum(r_squared_mask(radius, image.ndim) * neighborhood) /
estimated_mass)
return Rg
def _safe_center_of_mass(x, radius, grids):
normalizer = x.sum()
if normalizer == 0: # avoid divide-by-zero errors
return np.array(radius)
return np.array([(x * grids[dim]).sum() / normalizer
for dim in range(x.ndim)])
def refine(raw_image, image, radius, coords, separation=0, max_iterations=10,
engine='auto', characterize=True, walkthrough=False):
"""Find the center of mass of a bright feature starting from an estimate.
Characterize the neighborhood of a local maximum, and iteratively
hone in on its center-of-brightness. Return its coordinates, integrated
brightness, size (Rg), eccentricity (0=circular), and signal strength.
Parameters
----------
raw_image : array (any dimensions)
used for final characterization
image : array (any dimension)
processed image, used for locating center of mass
coord : array
estimated position
max_iterations : integer
max number of loops to refine the center of mass, default 10
characterize : boolean, True by default
Compute and return mass, size, eccentricity, signal.
walkthrough : boolean, False by default
Print the offset on each loop and display final neighborhood image.
engine : {'python', 'numba'}
Numba is faster if available, but it cannot do walkthrough.
"""
# ensure that radius is tuple of integers, for direct calls to refine()
radius = validate_tuple(radius, image.ndim)
# Main loop will be performed in separate function.
if engine == 'auto':
if NUMBA_AVAILABLE and image.ndim in [2, 3]:
engine = 'numba'
else:
engine = 'python'
if engine == 'python':
coords = np.array(coords) # a copy, will not modify in place
results = _refine(raw_image, image, radius, coords, max_iterations,
characterize, walkthrough)
elif engine == 'numba':
if not NUMBA_AVAILABLE:
warnings.warn("numba could not be imported. Without it, the "
"'numba' engine runs very slow. Use the 'python' "
"engine or install numba.", UserWarning)
if image.ndim not in [2, 3]:
raise NotImplementedError("The numba engine only supports 2D or 3D "
"images. You can extend it if you feel "
"like a hero.")
if walkthrough:
raise ValueError("walkthrough is not availabe in the numba engine")
# Do some extra prep in pure Python that can't be done in numba.
coords = np.array(coords, dtype=np.float64)
N = coords.shape[0]
mask = binary_mask(radius, image.ndim)
if image.ndim == 3:
if characterize:
if np.all(radius[1:] == radius[:-1]):
results_columns = 8
else:
results_columns = 10
else:
results_columns = 4
r2_mask = r_squared_mask(radius, image.ndim)[mask]
x2_masks = x_squared_masks(radius, image.ndim)
z2_mask = image.ndim * x2_masks[0][mask]
y2_mask = image.ndim * x2_masks[1][mask]
x2_mask = image.ndim * x2_masks[2][mask]
results = np.empty((N, results_columns), dtype=np.float64)
maskZ, maskY, maskX = np.asarray(np.asarray(mask.nonzero()),
dtype=np.int16)
_numba_refine_3D(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], radius[2], coords, N,
int(max_iterations), characterize,
image.shape[0], image.shape[1], image.shape[2],
maskZ, maskY, maskX, maskX.shape[0],
r2_mask, z2_mask, y2_mask, x2_mask, results)
elif not characterize:
mask_coordsY, mask_coordsX = np.asarray(mask.nonzero(), dtype=np.int16)
results = np.empty((N, 3), dtype=np.float64)
_numba_refine_2D(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], coords, N,
int(max_iterations),
image.shape[0], image.shape[1],
mask_coordsY, mask_coordsX, mask_coordsY.shape[0],
results)
elif radius[0] == radius[1]:
mask_coordsY, mask_coordsX = np.asarray(mask.nonzero(), dtype=np.int16)
results = np.empty((N, 7), dtype=np.float64)
r2_mask = r_squared_mask(radius, image.ndim)[mask]
cmask = cosmask(radius)[mask]
smask = sinmask(radius)[mask]
_numba_refine_2D_c(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], coords, N,
int(max_iterations),
image.shape[0], image.shape[1],
mask_coordsY, mask_coordsX, mask_coordsY.shape[0],
r2_mask, cmask, smask, results)
else:
mask_coordsY, mask_coordsX = np.asarray(mask.nonzero(), dtype=np.int16)
results = np.empty((N, 8), dtype=np.float64)
x2_masks = x_squared_masks(radius, image.ndim)
y2_mask = image.ndim * x2_masks[0][mask]
x2_mask = image.ndim * x2_masks[1][mask]
cmask = cosmask(radius)[mask]
smask = sinmask(radius)[mask]
_numba_refine_2D_c_a(np.asarray(raw_image), np.asarray(image),
radius[0], radius[1], coords, N,
int(max_iterations),
image.shape[0], image.shape[1],
mask_coordsY, mask_coordsX, mask_coordsY.shape[0],
y2_mask, x2_mask, cmask, smask, results)
else:
raise ValueError("Available engines are 'python' and 'numba'")
# Flat peaks return multiple nearby maxima. Eliminate duplicates.
if np.all(np.greater(separation, 0)):
mass_index = image.ndim # i.e., index of the 'mass' column
while True:
# Rescale positions, so that pairs are identified below a distance
# of 1. Do so every iteration (room for improvement?)
positions = results[:, :mass_index]/list(reversed(separation))
mass = results[:, mass_index]
duplicates = cKDTree(positions, 30).query_pairs(1)
if len(duplicates) == 0:
break
to_drop = []
for pair in duplicates:
# Drop the dimmer one.
if np.equal(*mass.take(pair, 0)):
# Rare corner case: a tie!
# Break ties by sorting by sum of coordinates, to avoid
# any randomness resulting from cKDTree returning a set.
dimmer = np.argsort(np.sum(positions.take(pair, 0), 1))[0]
else:
dimmer = np.argmin(mass.take(pair, 0))
to_drop.append(pair[dimmer])
results = np.delete(results, to_drop, 0)
return results
# (This is pure Python. A numba variant follows below.)
def _refine(raw_image, image, radius, coords, max_iterations,
characterize, walkthrough):
SHIFT_THRESH = 0.6
GOOD_ENOUGH_THRESH = 0.005
ndim = image.ndim
isotropic = np.all(radius[1:] == radius[:-1])
mask = binary_mask(radius, ndim)
slices = [[slice(c - rad, c + rad + 1) for c, rad in zip(coord, radius)]
for coord in coords]
# Declare arrays that we will fill iteratively through loop.
N = coords.shape[0]
final_coords = np.empty_like(coords, dtype=np.float64)
mass = np.empty(N, dtype=np.float64)
raw_mass = np.empty(N, dtype=np.float64)
if characterize:
if isotropic:
Rg = np.empty(N, dtype=np.float64)
else:
Rg = np.empty((N, len(radius)), dtype=np.float64)
ecc = np.empty(N, dtype=np.float64)
signal = np.empty(N, dtype=np.float64)
ogrid = np.ogrid[[slice(0, i) for i in mask.shape]] # for center of mass
ogrid = [g.astype(float) for g in ogrid]
for feat in range(N):
coord = coords[feat]
# Define the circular neighborhood of (x, y).
rect = slices[feat]
neighborhood = mask*image[rect]
cm_n = _safe_center_of_mass(neighborhood, radius, ogrid)
cm_i = cm_n - radius + coord # image coords
allow_moves = True
for iteration in range(max_iterations):
off_center = cm_n - radius
if walkthrough:
print_update(off_center)
if np.all(np.abs(off_center) < GOOD_ENOUGH_THRESH):
break # Accurate enough.
# If we're off by more than half a pixel in any direction, move.
elif np.any(np.abs(off_center) > SHIFT_THRESH) & allow_moves:
# In here, coord is an integer.
new_coord = coord
new_coord[off_center > SHIFT_THRESH] += 1
new_coord[off_center < -SHIFT_THRESH] -= 1
# Don't move outside the image!
upper_bound = np.array(image.shape) - 1 - radius
new_coord = np.clip(new_coord, radius, upper_bound).astype(int)
# Update slice to shifted position.
rect = [slice(c - rad, c + rad + 1)
for c, rad in zip(new_coord, radius)]
neighborhood = mask*image[rect]
# If we're off by less than half a pixel, interpolate.
else:
# Here, coord is a float. We are off the grid.
neighborhood = ndimage.shift(neighborhood, -off_center,
order=2, mode='constant', cval=0)
new_coord = coord + off_center
# Disallow any whole-pixels moves on future iterations.
allow_moves = False
cm_n = _safe_center_of_mass(neighborhood, radius, ogrid) # neighborhood
cm_i = cm_n - radius + new_coord # image coords
coord = new_coord
# matplotlib and ndimage have opposite conventions for xy <-> yx.
final_coords[feat] = cm_i[..., ::-1]
if walkthrough:
import matplotlib.pyplot as plt
plt.imshow(neighborhood)
# Characterize the neighborhood of our final centroid.
mass[feat] = neighborhood.sum()
if not characterize:
continue # short-circuit loop
if isotropic:
Rg[feat] = np.sqrt(np.sum(r_squared_mask(radius, ndim) *
neighborhood) / mass[feat])
else:
Rg[feat] = np.sqrt(ndim * np.sum(x_squared_masks(radius, ndim) *
neighborhood,
axis=tuple(range(1, ndim + 1))) /
mass[feat])[::-1] # change order yx -> xy
# I only know how to measure eccentricity in 2D.
if ndim == 2:
ecc[feat] = np.sqrt(np.sum(neighborhood*cosmask(radius))**2 +
np.sum(neighborhood*sinmask(radius))**2)
ecc[feat] /= (mass[feat] - neighborhood[radius] + 1e-6)
else:
ecc[feat] = np.nan
signal[feat] = neighborhood.max() # based on bandpassed image
raw_neighborhood = mask*raw_image[rect]
raw_mass[feat] = raw_neighborhood.sum() # based on raw image
if not characterize:
return np.column_stack([final_coords, mass])
else:
return np.column_stack([final_coords, mass, Rg, ecc, signal, raw_mass])
def locate(raw_image, diameter, minmass=100., maxsize=None, separation=None,
noise_size=1, smoothing_size=None, threshold=None, invert=False,
percentile=64, topn=None, preprocess=True, max_iterations=10,
filter_before=True, filter_after=True,
characterize=True, engine='auto'):
"""Locate Gaussian-like blobs of some approximate size in an image.
Preprocess the image by performing a band pass and a threshold.
Locate all peaks of brightness, characterize the neighborhoods of the peaks
and take only those with given total brightnesss ("mass"). Finally,
refine the positions of each peak.
Parameters
----------
image : image array (any dimensions)
diameter : feature size in px
This may be a single number or a tuple giving the feature's
extent in each dimension, useful when the dimensions do not have
equal resolution (e.g. confocal microscopy). The tuple order is the
same as the image shape, conventionally (z, y, x) or (y, x). The
number(s) must be odd integers. When in doubt, round up.
minmass : minimum integrated brightness
Default is 100, but a good value is often much higher. This is a
crucial parameter for elminating spurious features.
maxsize : maximum radius-of-gyration of brightness, default None
separation : feature separation, in pixels
Default is diameter + 1. May be a tuple, see diameter for details.
noise_size : width of Gaussian blurring kernel, in pixels
Default is 1. May be a tuple, see diameter for details.
smoothing_size : size of boxcar smoothing, in pixels
Default is diameter. May be a tuple, see diameter for details.
threshold : Clip bandpass result below this value.
Default None, passed through to bandpass.
invert : Set to True if features are darker than background. False by
default.
percentile : Features must have a peak brighter than pixels in this
percentile. This helps eliminate spurious peaks.
topn : Return only the N brightest features above minmass.
If None (default), return all features above minmass.
Returns
-------
DataFrame([x, y, mass, size, ecc, signal])
where mass means total integrated brightness of the blob,
size means the radius of gyration of its Gaussian-like profile,
and ecc is its eccentricity (0 is circular).
Other Parameters
----------------
preprocess : Set to False to turn out bandpass preprocessing.
max_iterations : integer
max number of loops to refine the center of mass, default 10
filter_before : boolean
Use minmass (and maxsize, if set) to eliminate spurious features
based on their estimated mass and size before refining position.
True by default for performance.
filter_after : boolean
Use final characterizations of mass and size to eliminate spurious
features. True by default.
characterize : boolean
Compute "extras": eccentricity, signal, ep. True by default.
engine : {'auto', 'python', 'numba'}
See Also
--------
batch : performs location on many images in batch
Notes
-----
Locate works with a coordinate system that has its origin at the center of
pixel (0, 0). In almost all cases this will be the topleft pixel: the
y-axis is pointing downwards.
This is an implementation of the Crocker-Grier centroid-finding algorithm.
[1]_
References
----------
.. [1] Crocker, J.C., Grier, D.G. http://dx.doi.org/10.1006/jcis.1996.0217
"""
# Validate parameters and set defaults.
raw_image = np.squeeze(raw_image)
shape = raw_image.shape
ndim = len(shape)
diameter = validate_tuple(diameter, ndim)
diameter = tuple([int(x) for x in diameter])
if not np.all([x & 1 for x in diameter]):
raise ValueError("Feature diameter must be an odd integer. Round up.")
radius = tuple([x//2 for x in diameter])
isotropic = np.all(radius[1:] == radius[:-1])
if (not isotropic) and (maxsize is not None):
raise ValueError("Filtering by size is not available for anisotropic "
"features.")
if separation is None:
separation = tuple([x + 1 for x in diameter])
else:
separation = validate_tuple(separation, ndim)
if smoothing_size is None:
smoothing_size = diameter
else:
smoothing_size = validate_tuple(smoothing_size, ndim)
noise_size = validate_tuple(noise_size, ndim)
# Check whether the image looks suspiciously like a color image.
if 3 in shape or 4 in shape:
dim = raw_image.ndim
warnings.warn("I am interpreting the image as {0}-dimensional. "
"If it is actually a {1}-dimensional color image, "
"convert it to grayscale first.".format(dim, dim-1))
if preprocess:
if invert:
# It is tempting to do this in place, but if it is called multiple
# times on the same image, chaos reigns.
if np.issubdtype(raw_image.dtype, np.integer):
max_value = np.iinfo(raw_image.dtype).max
raw_image = raw_image ^ max_value
else:
# To avoid degrading performance, assume gamut is zero to one.
# Have you ever encountered an image of unnormalized floats?
raw_image = 1 - raw_image
image = bandpass(raw_image, noise_size, smoothing_size, threshold)
else:
image = raw_image.copy()
# Coerce the image into integer type. Rescale to fill dynamic range.
if np.issubdtype(raw_image.dtype, np.integer):
dtype = raw_image.dtype
else:
dtype = np.uint8
scale_factor = scalefactor_to_gamut(image, dtype)
image = scale_to_gamut(image, dtype, scale_factor)
# Set up a DataFrame for the final results.
if image.ndim < 4:
coord_columns = ['x', 'y', 'z'][:image.ndim]
else:
coord_columns = map(lambda i: 'x' + str(i), range(image.ndim))
MASS_COLUMN_INDEX = len(coord_columns)
columns = coord_columns + ['mass']
if characterize:
if isotropic:
SIZE_COLUMN_INDEX = len(columns)
columns += ['size']
else:
SIZE_COLUMN_INDEX = range(len(columns),
len(columns) + len(coord_columns))
columns += ['size_' + cc for cc in coord_columns]
SIGNAL_COLUMN_INDEX = len(columns) + 1
columns += ['ecc', 'signal', 'raw_mass']
if isotropic and np.all(noise_size[1:] == noise_size[:-1]):
columns += ['ep']
else:
columns += ['ep_' + cc for cc in coord_columns]
# Find local maxima.
# Define zone of exclusion at edges of image, avoiding
# - Features with incomplete image data ("radius")
# - Extended particles that cannot be explored during subpixel
# refinement ("separation")
# - Invalid output of the bandpass step ("smoothing_size")
margin = tuple([max(rad, sep // 2 - 1, sm // 2) for (rad, sep, sm) in
zip(radius, separation, smoothing_size)])
coords = local_maxima(image, radius, percentile, margin)
count_maxima = coords.shape[0]
if count_maxima == 0:
return DataFrame(columns=columns)
# Proactively filter based on estimated mass/size before
# refining positions.
if filter_before:
approx_mass = np.empty(count_maxima) # initialize to avoid appending
for i in range(count_maxima):
approx_mass[i] = estimate_mass(image, radius, coords[i])
condition = approx_mass > minmass * scale_factor
if maxsize is not None:
approx_size = np.empty(count_maxima)
for i in range(count_maxima):
approx_size[i] = estimate_size(image, radius, coords[i],
approx_mass[i])
condition &= approx_size < maxsize
coords = coords[condition]
count_qualified = coords.shape[0]
if count_qualified == 0:
warnings.warn("No maxima survived mass- and size-based prefiltering.")
return DataFrame(columns=columns)
# Refine their locations and characterize mass, size, etc.
refined_coords = refine(raw_image, image, radius, coords, separation,
max_iterations, engine, characterize)
# mass and signal values has to be corrected due to the rescaling
# raw_mass was obtained from raw image; size and ecc are scale-independent
refined_coords[:, MASS_COLUMN_INDEX] *= 1. / scale_factor
if characterize:
refined_coords[:, SIGNAL_COLUMN_INDEX] *= 1. / scale_factor
# Filter again, using final ("exact") mass -- and size, if set.
exact_mass = refined_coords[:, MASS_COLUMN_INDEX]
if filter_after:
condition = exact_mass > minmass
if maxsize is not None:
exact_size = refined_coords[:, SIZE_COLUMN_INDEX]
condition &= exact_size < maxsize
refined_coords = refined_coords[condition]
exact_mass = exact_mass[condition] # used below by topn
count_qualified = refined_coords.shape[0]
if count_qualified == 0:
warnings.warn("No maxima survived mass- and size-based filtering.")
return DataFrame(columns=columns)
if topn is not None and count_qualified > topn:
if topn == 1:
# special case for high performance and correct shape
refined_coords = refined_coords[np.argmax(exact_mass)]
refined_coords = refined_coords.reshape(1, -1)
else:
refined_coords = refined_coords[np.argsort(exact_mass)][-topn:]
# Estimate the uncertainty in position using signal (measured in refine)
# and noise (measured here below).
if characterize:
if preprocess: # reuse processed image to increase performance
black_level, noise = measure_noise(raw_image, diameter,
threshold, image)
else:
black_level, noise = measure_noise(raw_image, diameter, threshold)
Npx = N_binary_mask(radius, ndim)
mass = refined_coords[:, SIGNAL_COLUMN_INDEX + 1] - Npx * black_level
ep = _static_error(mass, noise, radius[::-1], noise_size[::-1])
refined_coords = np.column_stack([refined_coords, ep])
f = DataFrame(refined_coords, columns=columns)
# If this is a pims Frame object, it has a frame number.
# Tag it on; this is helpful for parallelization.
if hasattr(raw_image, 'frame_no') and raw_image.frame_no is not None:
f['frame'] = raw_image.frame_no
return f
def batch(frames, diameter, minmass=100, maxsize=None, separation=None,
noise_size=1, smoothing_size=None, threshold=None, invert=False,
percentile=64, topn=None, preprocess=True, max_iterations=10,
filter_before=True, filter_after=True,
characterize=True, engine='auto',
output=None, meta=True):
"""Locate Gaussian-like blobs of some approximate size in a set of images.
Preprocess the image by performing a band pass and a threshold.
Locate all peaks of brightness, characterize the neighborhoods of the peaks
and take only those with given total brightnesss ("mass"). Finally,
refine the positions of each peak.
Parameters
----------
frames : list (or iterable) of images
diameter : feature size in px
This may be a single number or a tuple giving the feature's
extent in each dimension, useful when the dimensions do not have
equal resolution (e.g. confocal microscopy). The tuple order is the
same as the image shape, conventionally (z, y, x) or (y, x). The
number(s) must be odd integers. When in doubt, round up.
minmass : minimum integrated brightness
Default is 100, but a good value is often much higher. This is a
crucial parameter for elminating spurious features.
maxsize : maximum radius-of-gyration of brightness, default None
separation : feature separation, in pixels
Default is diameter + 1. May be a tuple, see diameter for details.
noise_size : width of Gaussian blurring kernel, in pixels
Default is 1. May be a tuple, see diameter for details.
smoothing_size : size of boxcar smoothing, in pixels
Default is diameter. May be a tuple, see diameter for details.
threshold : Clip bandpass result below this value.
Default None, passed through to bandpass.
invert : Set to True if features are darker than background. False by
default.
percentile : Features must have a peak brighter than pixels in this
percentile. This helps eliminate spurious peaks.
topn : Return only the N brightest features above minmass.
If None (default), return all features above minmass.
Returns
-------
DataFrame([x, y, mass, size, ecc, signal])
where mass means total integrated brightness of the blob,
size means the radius of gyration of its Gaussian-like profile,
and ecc is its eccentricity (0 is circular).
Other Parameters
----------------
preprocess : Set to False to turn off bandpass preprocessing.
max_iterations : integer
max number of loops to refine the center of mass, default 10
filter_before : boolean
Use minmass (and maxsize, if set) to eliminate spurious features
based on their estimated mass and size before refining position.
True by default for performance.
filter_after : boolean
Use final characterizations of mass and size to elminate spurious
features. True by default.
characterize : boolean
Compute "extras": eccentricity, signal, ep. True by default.
engine : {'auto', 'python', 'numba'}
output : {None, trackpy.PandasHDFStore, SomeCustomClass}
If None, return all results as one big DataFrame. Otherwise, pass
results from each frame, one at a time, to the write() method
of whatever class is specified here.
meta : By default, a YAML (plain text) log file is saved in the current
directory. You can specify a different filepath set False.
See Also
--------
locate : performs location on a single image
Notes
-----
This is an implementation of the Crocker-Grier centroid-finding algorithm.
[1]_
Locate works with a coordinate system that has its origin at the center of
pixel (0, 0). In almost all cases this will be the topleft pixel: the
y-axis is pointing downwards.
References
----------
.. [1] Crocker, J.C., Grier, D.G. http://dx.doi.org/10.1006/jcis.1996.0217
"""
# Gather meta information and save as YAML in current directory.
timestamp = pd.datetime.utcnow().strftime('%Y-%m-%d-%H%M%S')
try:
source = frames.filename
except:
source = None
meta_info = dict(timestamp=timestamp,
trackpy_version=trackpy.__version__,
source=source, diameter=diameter, minmass=minmass,
maxsize=maxsize, separation=separation,
noise_size=noise_size, smoothing_size=smoothing_size,
invert=invert, percentile=percentile, topn=topn,
preprocess=preprocess, max_iterations=max_iterations,
filter_before=filter_before, filter_after=filter_after)
if meta:
if isinstance(meta, str):
filename = meta
else:
filename = 'feature_log_%s.yml' % timestamp
record_meta(meta_info, filename)
all_features = []
for i, image in enumerate(frames):
features = locate(image, diameter, minmass, maxsize, separation,
noise_size, smoothing_size, threshold, invert,
percentile, topn, preprocess, max_iterations,
filter_before, filter_after, characterize,
engine)
if hasattr(image, 'frame_no') and image.frame_no is not None:
frame_no = image.frame_no
# If this works, locate created a 'frame' column.
else:
frame_no = i
features['frame'] = i # just counting iterations
message = "Frame %d: %d features" % (frame_no, len(features))
print_update(message)
if len(features) == 0:
continue
if output is None:
all_features.append(features)
else:
output.put(features)
if output is None:
if len(all_features) > 0:
return pd.concat(all_features).reset_index(drop=True)
else: # return empty DataFrame
warnings.warn("No maxima found in any frame.")
return pd.DataFrame(columns=list(features.columns) + ['frame'])
else:
return output
| {
"repo_name": "daniorerio/trackpy",
"path": "trackpy/feature.py",
"copies": "1",
"size": "32400",
"license": "bsd-3-clause",
"hash": -4641691812612571000,
"line_mean": 43.2019099591,
"line_max": 84,
"alpha_frac": 0.6044135802,
"autogenerated": false,
"ratio": 3.983769826632239,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5088183406832238,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from matplotlib import docstring
from matplotlib.offsetbox import (AnchoredOffsetbox, AuxTransformBox,
DrawingArea, TextArea, VPacker)
from matplotlib.patches import Rectangle, Ellipse
__all__ = ['AnchoredDrawingArea', 'AnchoredAuxTransformBox',
'AnchoredEllipse', 'AnchoredSizeBar']
class AnchoredDrawingArea(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, width, height, xdescent, ydescent,
loc, pad=0.4, borderpad=0.5, prop=None, frameon=True,
**kwargs):
"""
An anchored container with a fixed size and fillable DrawingArea.
Artists added to the *drawing_area* will have their coordinates
interpreted as pixels. Any transformations set on the artists will be
overridden.
Parameters
----------
width, height : int or float
width and height of the container, in pixels.
xdescent, ydescent : int or float
descent of the container in the x- and y- direction, in pixels.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.DrawingArea`
A container for artists to display.
Examples
--------
To display blue and red circles of different sizes in the upper right
of an axes *ax*:
>>> ada = AnchoredDrawingArea(20, 20, 0, 0, loc=1, frameon=False)
>>> ada.drawing_area.add_artist(Circle((10, 10), 10, fc="b"))
>>> ada.drawing_area.add_artist(Circle((30, 10), 5, fc="r"))
>>> ax.add_artist(ada)
"""
self.da = DrawingArea(width, height, xdescent, ydescent)
self.drawing_area = self.da
super(AnchoredDrawingArea, self).__init__(
loc, pad=pad, borderpad=borderpad, child=self.da, prop=None,
frameon=frameon, **kwargs
)
class AnchoredAuxTransformBox(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, loc,
pad=0.4, borderpad=0.5, prop=None, frameon=True, **kwargs):
"""
An anchored container with transformed coordinates.
Artists added to the *drawing_area* are scaled according to the
coordinates of the transformation used. The dimensions of this artist
will scale to contain the artists added.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
loc : int
Location of this artist. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the child objects, in fraction of the font
size. Defaults to 0.4.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.5.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
frameon : bool, optional
If True, draw a box around this artists. Defaults to True.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
drawing_area : `matplotlib.offsetbox.AuxTransformBox`
A container for artists to display.
Examples
--------
To display an ellipse in the upper left, with a width of 0.1 and
height of 0.4 in data coordinates:
>>> box = AnchoredAuxTransformBox(ax.transData, loc=2)
>>> el = Ellipse((0,0), width=0.1, height=0.4, angle=30)
>>> box.drawing_area.add_artist(el)
>>> ax.add_artist(box)
"""
self.drawing_area = AuxTransformBox(transform)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self.drawing_area,
prop=prop,
frameon=frameon,
**kwargs)
class AnchoredEllipse(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, width, height, angle, loc,
pad=0.1, borderpad=0.1, prop=None, frameon=True, **kwargs):
"""
Draw an anchored ellipse of a given size.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
width, height : int or float
Width and height of the ellipse, given in coordinates of
*transform*.
angle : int or float
Rotation of the ellipse, in degrees, anti-clockwise.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the ellipse, in fraction of the font size. Defaults
to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size. Defaults to 0.1.
frameon : bool, optional
If True, draw a box around the ellipse. Defaults to True.
prop : `matplotlib.font_manager.FontProperties`, optional
Font property used as a reference for paddings.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
ellipse : `matplotlib.patches.Ellipse`
Ellipse patch drawn.
"""
self._box = AuxTransformBox(transform)
self.ellipse = Ellipse((0, 0), width, height, angle)
self._box.add_artist(self.ellipse)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=prop,
frameon=frameon, **kwargs)
class AnchoredSizeBar(AnchoredOffsetbox):
@docstring.dedent
def __init__(self, transform, size, label, loc,
pad=0.1, borderpad=0.1, sep=2,
frameon=True, size_vertical=0, color='black',
label_top=False, fontproperties=None, fill_bar=None,
**kwargs):
"""
Draw a horizontal scale bar with a center-aligned label underneath.
Parameters
----------
transform : `matplotlib.transforms.Transform`
The transformation object for the coordinate system in use, i.e.,
:attr:`matplotlib.axes.Axes.transData`.
size : int or float
Horizontal length of the size bar, given in coordinates of
*transform*.
label : str
Label to display.
loc : int
Location of this size bar. Valid location codes are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
pad : int or float, optional
Padding around the label and size bar, in fraction of the font
size. Defaults to 0.1.
borderpad : int or float, optional
Border padding, in fraction of the font size.
Defaults to 0.1.
sep : int or float, optional
Seperation between the label and the size bar, in points.
Defaults to 2.
frameon : bool, optional
If True, draw a box around the horizontal bar and label.
Defaults to True.
size_vertical : int or float, optional
Vertical length of the size bar, given in coordinates of
*transform*. Defaults to 0.
color : str, optional
Color for the size bar and label.
Defaults to black.
label_top : bool, optional
If True, the label will be over the size bar.
Defaults to False.
fontproperties : `matplotlib.font_manager.FontProperties`, optional
Font properties for the label text.
fill_bar : bool, optional
If True and if size_vertical is nonzero, the size bar will
be filled in with the color specified by the size bar.
Defaults to True if `size_vertical` is greater than
zero and False otherwise.
**kwargs :
Keyworded arguments to pass to
:class:`matplotlib.offsetbox.AnchoredOffsetbox`.
Attributes
----------
size_bar : `matplotlib.offsetbox.AuxTransformBox`
Container for the size bar.
txt_label : `matplotlib.offsetbox.TextArea`
Container for the label of the size bar.
Notes
-----
If *prop* is passed as a keyworded argument, but *fontproperties* is
not, then *prop* is be assumed to be the intended *fontproperties*.
Using both *prop* and *fontproperties* is not supported.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpl_toolkits.axes_grid1.anchored_artists import \
AnchoredSizeBar
>>> fig, ax = plt.subplots()
>>> ax.imshow(np.random.random((10,10)))
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 data units', 4)
>>> ax.add_artist(bar)
>>> fig.show()
Using all the optional parameters
>>> import matplotlib.font_manager as fm
>>> fontprops = fm.FontProperties(size=14, family='monospace')
>>> bar = AnchoredSizeBar(ax.transData, 3, '3 units', 4, pad=0.5, \
sep=5, borderpad=0.5, frameon=False, \
size_vertical=0.5, color='white', \
fontproperties=fontprops)
"""
if fill_bar is None:
fill_bar = size_vertical > 0
self.size_bar = AuxTransformBox(transform)
self.size_bar.add_artist(Rectangle((0, 0), size, size_vertical,
fill=fill_bar, facecolor=color,
edgecolor=color))
if fontproperties is None and 'prop' in kwargs:
fontproperties = kwargs.pop('prop')
if fontproperties is None:
textprops = {'color': color}
else:
textprops = {'color': color, 'fontproperties': fontproperties}
self.txt_label = TextArea(
label,
minimumdescent=False,
textprops=textprops)
if label_top:
_box_children = [self.txt_label, self.size_bar]
else:
_box_children = [self.size_bar, self.txt_label]
self._box = VPacker(children=_box_children,
align="center",
pad=0, sep=sep)
AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,
child=self._box,
prop=fontproperties,
frameon=frameon, **kwargs)
| {
"repo_name": "louisLouL/pair_trading",
"path": "capstone_env/lib/python3.6/site-packages/mpl_toolkits/axes_grid1/anchored_artists.py",
"copies": "2",
"size": "13214",
"license": "mit",
"hash": 3850674713766062000,
"line_mean": 34.1436170213,
"line_max": 78,
"alpha_frac": 0.5423792947,
"autogenerated": false,
"ratio": 4.394413036248753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 376
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import os
import re
import signal
import sys
from six import unichr
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, cursors)
import matplotlib.backends.qt_editor.figureoptions as figureoptions
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
from matplotlib.figure import Figure
from .qt_compat import (
QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'super', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('super', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
cursors.WAIT: QtCore.Qt.WaitCursor,
}
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if is_pyqt5():
try:
from PyQt5 import QtX11Extras
is_x11_build = True
except ImportError:
is_x11_build = False
else:
is_x11_build = hasattr(QtGui, "QX11Info")
if is_x11_build:
display = os.environ.get('DISPLAY')
if display is None or not re.search(r':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([b"matplotlib"])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
if is_pyqt5():
try:
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError:
pass
def _allow_super_init(__init__):
"""
Decorator for ``__init__`` to allow ``super().__init__`` on PyQt4/PySide2.
"""
if QT_API == "PyQt5":
return __init__
else:
# To work around lack of cooperative inheritance in PyQt4, PySide,
# and PySide2, when calling FigureCanvasQT.__init__, we temporarily
# patch QWidget.__init__ by a cooperative version, that first calls
# QWidget.__init__ with no additional arguments, and then finds the
# next class in the MRO with an __init__ that does support cooperative
# inheritance (i.e., not defined by the PyQt4, PySide, PySide2, sip
# or Shiboken packages), and manually call its `__init__`, once again
# passing the additional arguments.
qwidget_init = QtWidgets.QWidget.__init__
def cooperative_qwidget_init(self, *args, **kwargs):
qwidget_init(self)
mro = type(self).__mro__
next_coop_init = next(
cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]
if cls.__module__.split(".")[0] not in [
"PyQt4", "sip", "PySide", "PySide2", "Shiboken"])
next_coop_init.__init__(self, *args, **kwargs)
@functools.wraps(__init__)
def wrapper(self, **kwargs):
try:
QtWidgets.QWidget.__init__ = cooperative_qwidget_init
__init__(self, **kwargs)
finally:
# Restore __init__
QtWidgets.QWidget.__init__ = qwidget_init
return wrapper
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
def _update_figure_dpi(self):
dpi = self._dpi_ratio * self.figure._original_dpi
self.figure._set_dpi(dpi, forward=False)
@_allow_super_init
def __init__(self, figure):
_create_qApp()
figure._original_dpi = figure.dpi
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
self._update_figure_dpi()
w, h = self.get_width_height()
self.resize(w, h)
self.setMouseTracking(True)
# Key auto-repeat enabled by default
self._keyautorepeat = True
# In cases with mixed resolution displays, we need to be careful if the
# dpi_ratio changes - in this case we need to resize the canvas
# accordingly. We could watch for screenChanged events from Qt, but
# the issue is that we can't guarantee this will be emitted *before*
# the first paintEvent for the canvas, so instead we keep track of the
# dpi_ratio value here and in paintEvent we resize the canvas if
# needed.
self._dpi_ratio_prev = None
@property
def _dpi_ratio(self):
# Not available on Qt4 or some older Qt5.
try:
return self.devicePixelRatio()
except AttributeError:
return 1
def get_width_height(self):
w, h = FigureCanvasBase.get_width_height(self)
return int(w / self._dpi_ratio), int(h / self._dpi_ratio)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, guiEvent=event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, guiEvent=event)
def mouseEventCoords(self, pos):
"""Calculate mouse coordinates in physical pixels
Qt5 use logical pixels, but the figure is scaled to physical
pixels for rendering. Transform to physical pixels so that
all of the down-stream transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
dpi_ratio = self._dpi_ratio
x = pos.x()
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height / dpi_ratio - pos.y()
return x * dpi_ratio, y * dpi_ratio
def mousePressEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button,
guiEvent=event)
def mouseDoubleClickEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True,
guiEvent=event)
def mouseMoveEvent(self, event):
x, y = self.mouseEventCoords(event)
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def mouseReleaseEvent(self, event):
x, y = self.mouseEventCoords(event)
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button,
guiEvent=event)
def wheelEvent(self, event):
x, y = self.mouseEventCoords(event)
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps:
FigureCanvasBase.scroll_event(self, x, y, steps, guiEvent=event)
def keyPressEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
@property
def keyAutoRepeat(self):
"""
If True, enable auto-repeat for key events.
"""
return self._keyautorepeat
@keyAutoRepeat.setter
def keyAutoRepeat(self, val):
self._keyautorepeat = bool(val)
def resizeEvent(self, event):
# _dpi_ratio_prev will be set the first time the canvas is painted, and
# the rendered buffer is useless before anyways.
if self._dpi_ratio_prev is None:
return
w = event.size().width() * self._dpi_ratio
h = event.size().height() * self._dpi_ratio
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# pass back into Qt to let it finish
QtWidgets.QWidget.resizeEvent(self, event)
# emit our resize events
FigureCanvasBase.resize_event(self)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if not self._keyautorepeat and event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
global qApp
qApp.processEvents()
def start_event_loop(self, timeout=0):
if hasattr(self, "_event_loop") and self._event_loop.isRunning():
raise RuntimeError("Event loop already running")
self._event_loop = event_loop = QtCore.QEventLoop()
if timeout:
timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)
event_loop.exec_()
def stop_event_loop(self, event=None):
if hasattr(self, "_event_loop"):
self._event_loop.quit()
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : qt.QToolBar
The qt.QToolBar
window : qt.QMainWindow
The qt.QMainWindow
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.svg')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
# add text label to status bar
self.statusbar_label = QtWidgets.QLabel()
self.window.statusBar().addWidget(self.statusbar_label)
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self.statusbar_label.setText)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
self.window.activateWindow()
self.window.raise_()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
self.window.destroyed.connect(self._widgetclosed)
if self.toolbar:
self.toolbar.destroy()
self.window.close()
def get_window_title(self):
return six.text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
if is_pyqt5():
name = name.replace('.png', '_large.png')
pm = QtGui.QPixmap(os.path.join(self.basedir, name))
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if text == 'Subplots':
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit axis, curve and image parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
# Esthetic adjustments - we need to set these explicitly in PyQt5
# otherwise the layout looks different - but we don't want to set it if
# not using HiDPI icons otherwise they look worse than before.
if is_pyqt5():
self.setIconSize(QtCore.QSize(24, 24))
self.layout().setSpacing(12)
if is_pyqt5():
# For some reason, self.setMinimumHeight doesn't seem to carry over to
# the actual sizeHint, so override it instead in order to make the
# aesthetic adjustments noted above.
def sizeHint(self):
size = super(NavigationToolbar2QT, self).sizeHint()
size.setHeight(max(48, size.height()))
return size
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if not allaxes:
QtWidgets.QMessageBox.warning(
self.parent, "Error", "There are no axes to edit.")
return
elif len(allaxes) == 1:
axes, = allaxes
else:
titles = []
for axes in allaxes:
name = (axes.get_title() or
" - ".join(filter(None, [axes.get_xlabel(),
axes.get_ylabel()])) or
"<anonymous {} (id: {:#x})>".format(
type(axes).__name__, id(axes)))
titles.append(name)
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s)
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
# Save dir for next time, unless empty str (i.e., use cwd).
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self._figure = targetfig
for lower, higher in [("bottom", "top"), ("left", "right")]:
self._widgets[lower].valueChanged.connect(
lambda val: self._widgets[higher].setMinimum(val + .001))
self._widgets[higher].valueChanged.connect(
lambda val: self._widgets[lower].setMaximum(val - .001))
self._attrs = ["top", "bottom", "left", "right", "hspace", "wspace"]
self._defaults = {attr: vars(self._figure.subplotpars)[attr]
for attr in self._attrs}
# Set values after setting the range callbacks, but before setting up
# the redraw callbacks.
self._reset()
for attr in self._attrs:
self._widgets[attr].valueChanged.connect(self._on_value_changed)
for action, method in [("Export values", self._export_values),
("Tight layout", self._tight_layout),
("Reset", self._reset),
("Close", self.close)]:
self._widgets[action].clicked.connect(method)
def _export_values(self):
# Explicitly round to 3 decimals (which is also the spinbox precision)
# to avoid numbers of the form 0.100...001.
dialog = QtWidgets.QDialog()
layout = QtWidgets.QVBoxLayout()
dialog.setLayout(layout)
text = QtWidgets.QPlainTextEdit()
text.setReadOnly(True)
layout.addWidget(text)
text.setPlainText(
",\n".join("{}={:.3}".format(attr, self._widgets[attr].value())
for attr in self._attrs))
# Adjust the height of the text widget to fit the whole text, plus
# some padding.
size = text.maximumSize()
size.setHeight(
QtGui.QFontMetrics(text.document().defaultFont())
.size(0, text.toPlainText()).height() + 20)
text.setMaximumSize(size)
dialog.exec_()
def _on_value_changed(self):
self._figure.subplots_adjust(**{attr: self._widgets[attr].value()
for attr in self._attrs})
self._figure.canvas.draw_idle()
def _tight_layout(self):
self._figure.tight_layout()
for attr in self._attrs:
widget = self._widgets[attr]
widget.blockSignals(True)
widget.setValue(vars(self._figure.subplotpars)[attr])
widget.blockSignals(False)
self._figure.canvas.draw_idle()
def _reset(self):
for attr, value in self._defaults.items():
self._widgets[attr].setValue(value)
def error_msg_qt(msg, parent=None):
if not isinstance(msg, six.string_types):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += six.text_type(value)
if len(msg):
error_msg_qt(msg)
@_Backend.export
class _BackendQT5(_Backend):
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@staticmethod
def mainloop():
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global qApp
qApp.exec_()
| {
"repo_name": "louisLouL/pair_trading",
"path": "capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py",
"copies": "2",
"size": "31226",
"license": "mit",
"hash": 5466092491139902000,
"line_mean": 35.2250580046,
"line_max": 79,
"alpha_frac": 0.5811503234,
"autogenerated": false,
"ratio": 4.080763199163617,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5661913522563617,
"avg_score": null,
"num_lines": null
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
import h5py
import numpy as np
import os.path
import pims
from .handlers_base import HandlerBase
from .readers.spe import PrincetonSPEFile
logger = logging.getLogger(__name__)
class IntegrityError(Exception):
pass
class AreaDetectorSPEHandler(HandlerBase):
specs = {'AD_SPE'} | HandlerBase.specs
def __init__(self, fpath, template, filename,
frame_per_point=1):
self._path = fpath
self._fpp = frame_per_point
self._template = template
self._filename = filename
self._f_cache = dict()
def __call__(self, point_number):
if point_number not in self._f_cache:
fname = self._template % (self._path,
self._filename,
point_number)
spe_obj = PrincetonSPEFile(fname)
self._f_cache[point_number] = spe_obj
spe = self._f_cache[point_number]
data = spe.getData()
if data.shape[0] != self._fpp:
raise IntegrityError(
"expected {} frames, found {} frames".format(
self._fpp, data.shape[0]))
return data.squeeze()
class AreaDetectorTiffHandler(HandlerBase):
specs = {'AD_TIFF'} | HandlerBase.specs
def __init__(self, fpath, template, filename, frame_per_point=1):
self._path = fpath
self._fpp = frame_per_point
self._template = template.replace('_%6.6d', '*')
self._filename = self._template % (self._path,
filename)
self._image_sequence = pims.ImageSequence(self._filename)
def __call__(self, point_number):
start, stop = point_number * self._fpp, (point_number + 1) * self._fpp
if stop > len(self._image_sequence):
# if asking for an image past the end, make sure we have an up to
# date list of the existing files
self._image_sequence = pims.ImageSequence(self._filename)
if stop > len(self._image_sequence):
# if we _still_ don't have enough files, raise
raise IntegrityError("Seeking Frame {0} out of {1} frames.".format(
stop, len(self._image_sequence)))
return np.asarray(list(self._image_sequence[start:stop])).squeeze()
class DummyAreaDetectorHandler(HandlerBase):
def __init__(self, fpath, frame_per_point=1, **kwargs):
self._fpp = frame_per_point
def __call__(self, **kwargs):
out_stack = np.ones((self._fpp, 10, 10)) * np.nan
# return stacked and squeezed results
return out_stack.squeeze()
class _HDF5HandlerBase(HandlerBase):
def open(self):
if self._file:
return
self._file = h5py.File(self._filename, 'r')
def close(self):
super(_HDF5HandlerBase, self).close()
self._file.close()
self._file = None
class HDF5DatasetSliceHandler(_HDF5HandlerBase):
"""
Handler for data stored in one Dataset of an HDF5 file.
Parameters
----------
filename : string
path to HDF5 file
key : string
key of the single HDF5 Dataset used by this Handler
frame_per_point : integer, optional
number of frames to return as one datum, default 1
"""
def __init__(self, filename, key, frame_per_point=1):
self._fpp = frame_per_point
self._filename = filename
self._key = key
self._file = None
self._dataset = None
self.open()
def __call__(self, point_number):
# Don't read out the dataset until it is requested for the first time.
if not self._dataset:
self._dataset = self._file[self._key]
start, stop = point_number * self._fpp, (point_number + 1) * self._fpp
return self._dataset[start:stop].squeeze()
class AreaDetectorHDF5Handler(HDF5DatasetSliceHandler):
"""
Handler for the 'AD_HDF5' spec used by Area Detectors.
In this spec, the key (i.e., HDF5 dataset path) is always
'/entry/data/data'.
Parameters
----------
filename : string
path to HDF5 file
frame_per_point : integer, optional
number of frames to return as one datum, default 1
"""
specs = {'AD_HDF5'} | HDF5DatasetSliceHandler.specs
def __init__(self, filename, frame_per_point=1):
hardcoded_key = '/entry/data/data'
super(AreaDetectorHDF5Handler, self).__init__(
filename=filename, key=hardcoded_key,
frame_per_point=frame_per_point)
class _HdfMapsHandlerBase(_HDF5HandlerBase):
"""
Reader for XRF data stored in hdf5 files.
The data set is assumed to be in a group called MAPS and stored
as a 3D array ordered [energy, x, y].
Parameters
----------
filename : str
Path to physical location of file
dset_path : str
The path to the dataset inside of 'MAPS'
"""
def __init__(self, filename, dset_path):
self._filename = filename
self._dset_path = dset_path
self._file = None
self._dset = None
self.open()
def open(self):
"""
Open the file for reading.
Provided as a stand alone function to allow re-opening of the handler
"""
if self._file:
return
self._file = h5py.File(self._filename, mode='r')
self._dset = self._file['/'.join(['MAPS', self._dset_path])]
def __call__(self):
if not self._file:
raise RuntimeError("File is not open")
class HDFMapsSpectrumHandler(_HdfMapsHandlerBase):
"""
Handler which selects energy spectrum from
a MAPS XRF data product.
"""
specs = {'MAPS_SPECTRUM'} | _HdfMapsHandlerBase.specs
def __call__(self, x, y):
"""
Return the spectrum at the x, y position
Parameters
----------
x : int
raster index in the x direction
y : int
raster index in the y direction
Returns
-------
spectrum : ndarray
The MCA channels
"""
super(HDFMapsSpectrumHandler, self).__call__()
return self._dset[:, x, y]
class HDFMapsEnergyHandler(_HdfMapsHandlerBase):
"""
Handler which select fixed-energy slices from
a MAPS XRF data file.
"""
specs = {'MAPS_PLANE'} | _HdfMapsHandlerBase.specs
def __call__(self, e_index):
"""
Return the raster plane at a fixed energy
Parameters
----------
e_index : int
The index of the engery
Returns
-------
plane : ndarray
The raster image at a fixed energy.
"""
super(HDFMapsEnergyHandler, self).__call__()
return self._dset[e_index, :, :]
class NpyHandler(HandlerBase):
"""
Class to deal with reading npy files
Parameters
----------
fpath : str
Path to file
mmap_mode : {'r', 'r+', c}, optional
memmap mode to use to open file
"""
specs = {'npy'} | HandlerBase.specs
def __init__(self, filename, mmap_mode=None):
self._mmap_mode = mmap_mode
if not os.path.exists(filename):
raise IOError("the requested file {fpath} does not exst")
self._fpath = filename
def __call__(self):
return np.load(self._fpath, self._mmap_mode)
class NpyFrameWise(HandlerBase):
specs = {'npy_FRAMEWISE'} | HandlerBase.specs
def __init__(self, filename, mmap_mode=None):
self._mmap_mode = mmap_mode
if not os.path.exists(filename):
raise IOError("the requested file {fpath} does not exst")
self._fpath = filename
self._data = np.load(self._fpath, self._mmap_mode)
def __call__(self, frame_no):
return self._data[frame_no]
| {
"repo_name": "danielballan/filestore",
"path": "filestore/handlers.py",
"copies": "1",
"size": "7964",
"license": "bsd-3-clause",
"hash": 701646722810085000,
"line_mean": 27.8550724638,
"line_max": 79,
"alpha_frac": 0.5777247614,
"autogenerated": false,
"ratio": 4.002010050251256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 276
} |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import logging
import h5py
import numpy as np
import os.path
import tifffile
from .handlers_base import HandlerBase
from .readers.spe import PrincetonSPEFile
logger = logging.getLogger(__name__)
from pims import FramesSequence, Frame
# The ImageCube class is used for a per event representation of
# a dataset
class ImageStack(FramesSequence):
"One of these represents the data from an event: (num_images x w x h)"
def __init__(self, dataset, start, stop):
# `start` and `stop` are the limits of this cube
# i indexes within the cube
self._start = start
self._stop = stop
self._dataset = dataset
# work around inconsistent naming choices in databroker's Image object
self.dtype = self.pixel_type
self.shape = self.frame_shape
def get_frame(self, i):
return Frame(self._dataset[self._start + i], frame_no=i)
def __len__(self):
return self._stop - self._start
@property
def pixel_type(self):
return self._dataset.dtype
@property
def frame_shape(self):
return self._dataset.shape[1:]
class IntegrityError(Exception):
pass
class AreaDetectorSPEHandler(HandlerBase):
specs = {'AD_SPE'} | HandlerBase.specs
def __init__(self, fpath, template, filename,
frame_per_point=1):
self._path = fpath
self._fpp = frame_per_point
self._template = template
self._filename = filename
self._f_cache = dict()
def __call__(self, point_number):
if point_number not in self._f_cache:
fname = self._template % (self._path,
self._filename,
point_number)
spe_obj = PrincetonSPEFile(fname)
self._f_cache[point_number] = spe_obj
spe = self._f_cache[point_number]
data = spe.getData()
if data.shape[0] != self._fpp:
raise IntegrityError("expected {} frames, found {} frames".format(
self._fpp, data.shape[0]))
return data.squeeze()
def get_file_list(self, datum_kwarg_gen):
return [self._template % (self._path,
self._filename,
d['point_number'])
for d in datum_kwarg_gen]
class AreaDetectorTiffHandler(HandlerBase):
specs = {'AD_TIFF'} | HandlerBase.specs
def __init__(self, fpath, template, filename, frame_per_point=1):
self._path = fpath
self._fpp = frame_per_point
self._template = template
self._filename = filename
def _fnames_for_point(self, point_number):
start, stop = point_number * self._fpp, (point_number + 1) * self._fpp
for j in range(start, stop):
yield self._template % (self._path, self._filename, j)
def __call__(self, point_number):
ret = []
for fn in self._fnames_for_point(point_number):
with tifffile.TiffFile(fn) as tif:
ret.append(tif.asarray())
return np.array(ret).squeeze()
def get_file_list(self, datum_kwargs):
ret = []
for d_kw in datum_kwargs:
ret.extend(self._fnames_for_point(**d_kw))
return ret
class DummyAreaDetectorHandler(HandlerBase):
def __init__(self, fpath, frame_per_point=1, **kwargs):
self._fpp = frame_per_point
def __call__(self, **kwargs):
out_stack = np.ones((self._fpp, 10, 10)) * np.nan
# return stacked and squeezed results
return out_stack.squeeze()
class HDF5DatasetSliceHandler(HandlerBase):
"""
Handler for data stored in one Dataset of an HDF5 file.
Parameters
----------
filename : string
path to HDF5 file
key : string
key of the single HDF5 Dataset used by this Handler
frame_per_point : integer, optional
number of frames to return as one datum, default 1
"""
def __init__(self, filename, key, frame_per_point=1):
self._fpp = frame_per_point
self._filename = filename
self._key = key
self._file = None
self._dataset = None
self._data_objects = {}
self.open()
def get_file_list(self, datum_kwarg_gen):
return [self._filename]
def __call__(self, point_number):
# Don't read out the dataset until it is requested for the first time.
if not self._dataset:
self._dataset = self._file[self._key]
if point_number not in self._data_objects:
start = point_number * self._fpp
stop = (point_number + 1) * self._fpp
self._data_objects[point_number] = ImageStack(self._dataset,
start, stop)
return self._data_objects[point_number]
def open(self):
if self._file:
return
self._file = h5py.File(self._filename, 'r')
def close(self):
super(HDF5DatasetSliceHandler, self).close()
self._file.close()
self._file = None
class AreaDetectorHDF5Handler(HDF5DatasetSliceHandler):
"""
Handler for the 'AD_HDF5' spec used by Area Detectors.
In this spec, the key (i.e., HDF5 dataset path) is always
'/entry/data/data'.
Parameters
----------
filename : string
path to HDF5 file
frame_per_point : integer, optional
number of frames to return as one datum, default 1
"""
specs = {'AD_HDF5'} | HDF5DatasetSliceHandler.specs
def __init__(self, filename, frame_per_point=1):
hardcoded_key = '/entry/data/data'
super(AreaDetectorHDF5Handler, self).__init__(
filename=filename, key=hardcoded_key,
frame_per_point=frame_per_point)
class AreaDetectorHDF5SWMRHandler(AreaDetectorHDF5Handler):
"""
Handler for the 'AD_HDF5_SWMR' spec used by Area Detectors.
In this spec, the key (i.e., HDF5 dataset path) is always
'/entry/data/data'.
Parameters
----------
filename : string
path to HDF5 file
frame_per_point : integer, optional
number of frames to return as one datum, default 1
"""
specs = {'AD_HDF5_SWMR'} | HDF5DatasetSliceHandler.specs
def open(self):
if self._file:
return
self._file = h5py.File(self._filename, 'r', swmr=True)
def __call__(self, point_number):
if self._dataset is not None:
self._dataset.id.refresh()
rtn = super(AreaDetectorHDF5SWMRHandler, self).__call__(
point_number)
return rtn
class AreaDetectorHDF5TimestampHandler(HandlerBase):
""" Handler to retrieve timestamps from Areadetector HDF5 File
In this spec, the timestamps of the images are read.
Parameters
----------
filename : string
path to HDF5 file
frame_per_point : integer, optional
number of frames to return as one datum, default 1
"""
specs = {'AD_HDF5_TS'} | HandlerBase.specs
def __init__(self, filename, frame_per_point=1):
self._fpp = frame_per_point
self._filename = filename
self._key = ['/entry/instrument/NDAttributes/NDArrayEpicsTSSec',
'/entry/instrument/NDAttributes/NDArrayEpicsTSnSec']
self._file = None
self._dataset1 = None
self._dataset2 = None
self.open()
def __call__(self, point_number):
# Don't read out the dataset until it is requested for the first time.
if not self._dataset1:
self._dataset1 = self._file[self._key[0]]
if not self._dataset2:
self._dataset2 = self._file[self._key[1]]
start, stop = point_number * self._fpp, (point_number + 1) * self._fpp
rtn = self._dataset1[start:stop].squeeze()
rtn = rtn + (self._dataset2[start:stop].squeeze() * 1e-9)
return rtn
def open(self):
if self._file:
return
self._file = h5py.File(self._filename, 'r')
def close(self):
super(AreaDetectorHDF5TimestampHandler, self).close()
self._file.close()
self._file = None
class AreaDetectorHDF5SWMRTimestampHandler(AreaDetectorHDF5TimestampHandler):
""" Handler to retrieve timestamps from Areadetector HDF5 File
In this spec, the timestamps of the images are read. Reading
is done using SWMR option to allow read during processing
Parameters
----------
filename : string
path to HDF5 file
frame_per_point : integer, optional
number of frames to return as one datum, default 1
"""
specs = {'AD_HDF5_SWMR_TS'} | HandlerBase.specs
def open(self):
if self._file:
return
self._file = h5py.File(self._filename, 'r', swmr=True)
def __call__(self, point_number):
if (self._dataset1 is not None) and (self._dataset2 is not None):
self._dataset.id.refresh()
rtn = super(AreaDetectorHDF5SWMRTimestampHandler, self).__call__(
point_number)
return rtn
class _HdfMapsHandlerBase(HDF5DatasetSliceHandler):
"""
Reader for XRF data stored in hdf5 files.
The data set is assumed to be in a group called MAPS and stored
as a 3D array ordered [energy, x, y].
Parameters
----------
filename : str
Path to physical location of file
dset_path : str
The path to the dataset inside of 'MAPS'
"""
def __init__(self, filename, dset_path):
self._filename = filename
self._dset_path = dset_path
self._file = None
self._dset = None
self._swmr = False
self.open()
def open(self):
"""
Open the file for reading.
Provided as a stand alone function to allow re-opening of the handler
"""
super(_HdfMapsHandlerBase, self).open()
self._dset = self._file['/'.join(['MAPS', self._dset_path])]
def __call__(self):
if not self._file:
raise RuntimeError("File is not open")
if self._swmr:
self._dataset.id.refresh()
class HDFMapsSpectrumHandler(_HdfMapsHandlerBase):
"""
Handler which selects energy spectrum from
a MAPS XRF data product.
"""
specs = {'MAPS_SPECTRUM'} | _HdfMapsHandlerBase.specs
def __call__(self, x, y):
"""
Return the spectrum at the x, y position
Parameters
----------
x : int
raster index in the x direction
y : int
raster index in the y direction
Returns
-------
spectrum : ndarray
The MCA channels
"""
super(HDFMapsSpectrumHandler, self).__call__()
return self._dset[:, x, y]
class HDFMapsEnergyHandler(_HdfMapsHandlerBase):
"""
Handler which select fixed-energy slices from
a MAPS XRF data file.
"""
specs = {'MAPS_PLANE'} | _HdfMapsHandlerBase.specs
def __call__(self, e_index):
"""
Return the raster plane at a fixed energy
Parameters
----------
e_index : int
The index of the engery
Returns
-------
plane : ndarray
The raster image at a fixed energy.
"""
super(HDFMapsEnergyHandler, self).__call__()
return self._dset[e_index, :, :]
class NpyHandler(HandlerBase):
"""
Class to deal with reading npy files
Parameters
----------
fpath : str
Path to file
mmap_mode : {'r', 'r+', c}, optional
memmap mode to use to open file
"""
specs = {'npy'} | HandlerBase.specs
def __init__(self, filename, mmap_mode=None):
self._mmap_mode = mmap_mode
if not os.path.exists(filename):
raise IOError("the requested file {fpath} does not exst")
self._fpath = filename
def __call__(self):
return np.load(self._fpath, self._mmap_mode)
def get_file_list(self, datum_kwarg_gen):
return [self._fpath]
class NpyFrameWise(HandlerBase):
specs = {'npy_FRAMEWISE'} | HandlerBase.specs
def __init__(self, filename, mmap_mode=None):
self._mmap_mode = mmap_mode
if not os.path.exists(filename):
raise IOError("the requested file {fpath} does not exst")
self._fpath = filename
self._data = np.load(self._fpath, self._mmap_mode)
def __call__(self, frame_no):
return self._data[frame_no]
def get_file_list(self, datum_kwarg_gen):
return [self._fpath]
| {
"repo_name": "tacaswell/filestore",
"path": "filestore/handlers.py",
"copies": "2",
"size": "12679",
"license": "bsd-3-clause",
"hash": 2242047748813485600,
"line_mean": 27.9474885845,
"line_max": 78,
"alpha_frac": 0.5855351368,
"autogenerated": false,
"ratio": 3.964665415884928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00005854115443156539,
"num_lines": 438
} |